xref: /titanic_41/usr/src/uts/i86pc/io/pcplusmp/apic_common.c (revision ba7866cd2cbdf574f47d4e38a1301b90744dd677)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 
26 /*
27  * PSMI 1.1 extensions are supported only in 2.6 and later versions.
28  * PSMI 1.2 extensions are supported only in 2.7 and later versions.
29  * PSMI 1.3 and 1.4 extensions are supported in Solaris 10.
30  * PSMI 1.5 extensions are supported in Solaris Nevada.
31  * PSMI 1.6 extensions are supported in Solaris Nevada.
32  * PSMI 1.7 extensions are supported in Solaris Nevada.
33  */
34 #define	PSMI_1_7
35 
36 #include <sys/processor.h>
37 #include <sys/time.h>
38 #include <sys/psm.h>
39 #include <sys/smp_impldefs.h>
40 #include <sys/cram.h>
41 #include <sys/acpi/acpi.h>
42 #include <sys/acpica.h>
43 #include <sys/psm_common.h>
44 #include <sys/apic.h>
45 #include <sys/pit.h>
46 #include <sys/ddi.h>
47 #include <sys/sunddi.h>
48 #include <sys/ddi_impldefs.h>
49 #include <sys/pci.h>
50 #include <sys/promif.h>
51 #include <sys/x86_archext.h>
52 #include <sys/cpc_impl.h>
53 #include <sys/uadmin.h>
54 #include <sys/panic.h>
55 #include <sys/debug.h>
56 #include <sys/archsystm.h>
57 #include <sys/trap.h>
58 #include <sys/machsystm.h>
59 #include <sys/sysmacros.h>
60 #include <sys/cpuvar.h>
61 #include <sys/rm_platter.h>
62 #include <sys/privregs.h>
63 #include <sys/note.h>
64 #include <sys/pci_intr_lib.h>
65 #include <sys/spl.h>
66 #include <sys/clock.h>
67 #include <sys/dditypes.h>
68 #include <sys/sunddi.h>
69 #include <sys/x_call.h>
70 #include <sys/reboot.h>
71 #include <sys/hpet.h>
72 #include <sys/apic_common.h>
73 
74 static void	apic_record_ioapic_rdt(void *intrmap_private,
75 		    ioapic_rdt_t *irdt);
76 static void	apic_record_msi(void *intrmap_private, msi_regs_t *mregs);
77 
78 /*
79  * Common routines between pcplusmp & apix (taken from apic.c).
80  */
81 
82 int	apic_clkinit(int);
83 hrtime_t apic_gethrtime(void);
84 void	apic_send_ipi(int, int);
85 void	apic_set_idlecpu(processorid_t);
86 void	apic_unset_idlecpu(processorid_t);
87 void	apic_shutdown(int, int);
88 void	apic_preshutdown(int, int);
89 processorid_t	apic_get_next_processorid(processorid_t);
90 void	apic_timer_reprogram(hrtime_t);
91 void	apic_timer_enable(void);
92 void	apic_timer_disable(void);
93 
94 hrtime_t apic_gettime();
95 
96 enum apic_ioapic_method_type apix_mul_ioapic_method = APIC_MUL_IOAPIC_PCPLUSMP;
97 
98 int	apic_oneshot = 0;
99 int	apic_oneshot_enable = 1; /* to allow disabling one-shot capability */
100 
101 /* Now the ones for Dynamic Interrupt distribution */
102 int	apic_enable_dynamic_migration = 0;
103 
104 /* maximum loop count when sending Start IPIs. */
105 int apic_sipi_max_loop_count = 0x1000;
106 
107 /*
108  * These variables are frequently accessed in apic_intr_enter(),
109  * apic_intr_exit and apic_setspl, so group them together
110  */
111 volatile uint32_t *apicadr =  NULL;	/* virtual addr of local APIC	*/
112 int apic_setspl_delay = 1;		/* apic_setspl - delay enable	*/
113 int apic_clkvect;
114 
115 /* vector at which error interrupts come in */
116 int apic_errvect;
117 int apic_enable_error_intr = 1;
118 int apic_error_display_delay = 100;
119 
120 /* vector at which performance counter overflow interrupts come in */
121 int apic_cpcovf_vect;
122 int apic_enable_cpcovf_intr = 1;
123 
124 /* vector at which CMCI interrupts come in */
125 int apic_cmci_vect;
126 extern int cmi_enable_cmci;
127 extern void cmi_cmci_trap(void);
128 
129 kmutex_t cmci_cpu_setup_lock;	/* protects cmci_cpu_setup_registered */
130 int cmci_cpu_setup_registered;
131 
132 /* number of CPUs in power-on transition state */
133 static int apic_poweron_cnt = 0;
134 lock_t apic_mode_switch_lock;
135 
136 /*
137  * Patchable global variables.
138  */
139 int	apic_forceload = 0;
140 
141 int	apic_coarse_hrtime = 1;		/* 0 - use accurate slow gethrtime() */
142 
143 int	apic_flat_model = 0;		/* 0 - clustered. 1 - flat */
144 int	apic_panic_on_nmi = 0;
145 int	apic_panic_on_apic_error = 0;
146 
147 int	apic_verbose = 0;	/* 0x1ff */
148 
149 /* minimum number of timer ticks to program to */
150 int apic_min_timer_ticks = 1;
151 
152 #ifdef DEBUG
153 int	apic_debug = 0;
154 int	apic_restrict_vector = 0;
155 
156 int	apic_debug_msgbuf[APIC_DEBUG_MSGBUFSIZE];
157 int	apic_debug_msgbufindex = 0;
158 
159 #endif /* DEBUG */
160 
161 uint_t apic_nsec_per_intr = 0;
162 
163 uint_t apic_nticks = 0;
164 uint_t apic_skipped_redistribute = 0;
165 
166 uint_t last_count_read = 0;
167 lock_t	apic_gethrtime_lock;
168 volatile int	apic_hrtime_stamp = 0;
169 volatile hrtime_t apic_nsec_since_boot = 0;
170 uint_t apic_hertz_count;
171 
172 uint64_t apic_ticks_per_SFnsecs;	/* # of ticks in SF nsecs */
173 
174 static hrtime_t apic_nsec_max;
175 
176 static	hrtime_t	apic_last_hrtime = 0;
177 int		apic_hrtime_error = 0;
178 int		apic_remote_hrterr = 0;
179 int		apic_num_nmis = 0;
180 int		apic_apic_error = 0;
181 int		apic_num_apic_errors = 0;
182 int		apic_num_cksum_errors = 0;
183 
184 int	apic_error = 0;
185 
186 static	int	apic_cmos_ssb_set = 0;
187 
188 /* use to make sure only one cpu handles the nmi */
189 lock_t	apic_nmi_lock;
190 /* use to make sure only one cpu handles the error interrupt */
191 lock_t	apic_error_lock;
192 
193 static	struct {
194 	uchar_t	cntl;
195 	uchar_t	data;
196 } aspen_bmc[] = {
197 	{ CC_SMS_WR_START,	0x18 },		/* NetFn/LUN */
198 	{ CC_SMS_WR_NEXT,	0x24 },		/* Cmd SET_WATCHDOG_TIMER */
199 	{ CC_SMS_WR_NEXT,	0x84 },		/* DataByte 1: SMS/OS no log */
200 	{ CC_SMS_WR_NEXT,	0x2 },		/* DataByte 2: Power Down */
201 	{ CC_SMS_WR_NEXT,	0x0 },		/* DataByte 3: no pre-timeout */
202 	{ CC_SMS_WR_NEXT,	0x0 },		/* DataByte 4: timer expir. */
203 	{ CC_SMS_WR_NEXT,	0xa },		/* DataByte 5: init countdown */
204 	{ CC_SMS_WR_END,	0x0 },		/* DataByte 6: init countdown */
205 
206 	{ CC_SMS_WR_START,	0x18 },		/* NetFn/LUN */
207 	{ CC_SMS_WR_END,	0x22 }		/* Cmd RESET_WATCHDOG_TIMER */
208 };
209 
210 static	struct {
211 	int	port;
212 	uchar_t	data;
213 } sitka_bmc[] = {
214 	{ SMS_COMMAND_REGISTER,	SMS_WRITE_START },
215 	{ SMS_DATA_REGISTER,	0x18 },		/* NetFn/LUN */
216 	{ SMS_DATA_REGISTER,	0x24 },		/* Cmd SET_WATCHDOG_TIMER */
217 	{ SMS_DATA_REGISTER,	0x84 },		/* DataByte 1: SMS/OS no log */
218 	{ SMS_DATA_REGISTER,	0x2 },		/* DataByte 2: Power Down */
219 	{ SMS_DATA_REGISTER,	0x0 },		/* DataByte 3: no pre-timeout */
220 	{ SMS_DATA_REGISTER,	0x0 },		/* DataByte 4: timer expir. */
221 	{ SMS_DATA_REGISTER,	0xa },		/* DataByte 5: init countdown */
222 	{ SMS_COMMAND_REGISTER,	SMS_WRITE_END },
223 	{ SMS_DATA_REGISTER,	0x0 },		/* DataByte 6: init countdown */
224 
225 	{ SMS_COMMAND_REGISTER,	SMS_WRITE_START },
226 	{ SMS_DATA_REGISTER,	0x18 },		/* NetFn/LUN */
227 	{ SMS_COMMAND_REGISTER,	SMS_WRITE_END },
228 	{ SMS_DATA_REGISTER,	0x22 }		/* Cmd RESET_WATCHDOG_TIMER */
229 };
230 
231 /* Patchable global variables. */
232 int		apic_kmdb_on_nmi = 0;		/* 0 - no, 1 - yes enter kmdb */
233 uint32_t	apic_divide_reg_init = 0;	/* 0 - divide by 2 */
234 
235 /* default apic ops without interrupt remapping */
236 static apic_intrmap_ops_t apic_nointrmap_ops = {
237 	(int (*)(int))return_instr,
238 	(void (*)(int))return_instr,
239 	(void (*)(void **, dev_info_t *, uint16_t, int, uchar_t))return_instr,
240 	(void (*)(void *, void *, uint16_t, int))return_instr,
241 	(void (*)(void **))return_instr,
242 	apic_record_ioapic_rdt,
243 	apic_record_msi,
244 };
245 
246 apic_intrmap_ops_t *apic_vt_ops = &apic_nointrmap_ops;
247 apic_cpus_info_t	*apic_cpus = NULL;
248 cpuset_t	apic_cpumask;
249 uint_t		apic_picinit_called;
250 
251 /* Flag to indicate that we need to shut down all processors */
252 static uint_t	apic_shutdown_processors;
253 
254 /*
255  * Probe the ioapic method for apix module. Called in apic_probe_common()
256  */
257 int
258 apic_ioapic_method_probe()
259 {
260 	if (apix_enable == 0)
261 		return (PSM_SUCCESS);
262 
263 	/*
264 	 * Set IOAPIC EOI handling method. The priority from low to high is:
265 	 * 	1. IOxAPIC: with EOI register
266 	 * 	2. IOMMU interrupt mapping
267 	 *	3. Mask-Before-EOI method for systems without boot
268 	 *	interrupt routing, such as systems with only one IOAPIC;
269 	 *	NVIDIA CK8-04/MCP55 systems; systems with bridge solution
270 	 *	which disables the boot interrupt routing already.
271 	 * 	4. Directed EOI
272 	 */
273 	if (apic_io_ver[0] >= 0x20)
274 		apix_mul_ioapic_method = APIC_MUL_IOAPIC_IOXAPIC;
275 	if ((apic_io_max == 1) || (apic_nvidia_io_max == apic_io_max))
276 		apix_mul_ioapic_method = APIC_MUL_IOAPIC_MASK;
277 	if (apic_directed_EOI_supported())
278 		apix_mul_ioapic_method = APIC_MUL_IOAPIC_DEOI;
279 
280 	/* fall back to pcplusmp */
281 	if (apix_mul_ioapic_method == APIC_MUL_IOAPIC_PCPLUSMP) {
282 		/* make sure apix is after pcplusmp in /etc/mach */
283 		apix_enable = 0; /* go ahead with pcplusmp install next */
284 		return (PSM_FAILURE);
285 	}
286 
287 	return (PSM_SUCCESS);
288 }
289 
290 /*
291  * handler for APIC Error interrupt. Just print a warning and continue
292  */
293 int
294 apic_error_intr()
295 {
296 	uint_t	error0, error1, error;
297 	uint_t	i;
298 
299 	/*
300 	 * We need to write before read as per 7.4.17 of system prog manual.
301 	 * We do both and or the results to be safe
302 	 */
303 	error0 = apic_reg_ops->apic_read(APIC_ERROR_STATUS);
304 	apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
305 	error1 = apic_reg_ops->apic_read(APIC_ERROR_STATUS);
306 	error = error0 | error1;
307 
308 	/*
309 	 * Clear the APIC error status (do this on all cpus that enter here)
310 	 * (two writes are required due to the semantics of accessing the
311 	 * error status register.)
312 	 */
313 	apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
314 	apic_reg_ops->apic_write(APIC_ERROR_STATUS, 0);
315 
316 	/*
317 	 * Prevent more than 1 CPU from handling error interrupt causing
318 	 * double printing (interleave of characters from multiple
319 	 * CPU's when using prom_printf)
320 	 */
321 	if (lock_try(&apic_error_lock) == 0)
322 		return (error ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
323 	if (error) {
324 #if	DEBUG
325 		if (apic_debug)
326 			debug_enter("pcplusmp: APIC Error interrupt received");
327 #endif /* DEBUG */
328 		if (apic_panic_on_apic_error)
329 			cmn_err(CE_PANIC,
330 			    "APIC Error interrupt on CPU %d. Status = %x",
331 			    psm_get_cpu_id(), error);
332 		else {
333 			if ((error & ~APIC_CS_ERRORS) == 0) {
334 				/* cksum error only */
335 				apic_error |= APIC_ERR_APIC_ERROR;
336 				apic_apic_error |= error;
337 				apic_num_apic_errors++;
338 				apic_num_cksum_errors++;
339 			} else {
340 				/*
341 				 * prom_printf is the best shot we have of
342 				 * something which is problem free from
343 				 * high level/NMI type of interrupts
344 				 */
345 				prom_printf("APIC Error interrupt on CPU %d. "
346 				    "Status 0 = %x, Status 1 = %x\n",
347 				    psm_get_cpu_id(), error0, error1);
348 				apic_error |= APIC_ERR_APIC_ERROR;
349 				apic_apic_error |= error;
350 				apic_num_apic_errors++;
351 				for (i = 0; i < apic_error_display_delay; i++) {
352 					tenmicrosec();
353 				}
354 				/*
355 				 * provide more delay next time limited to
356 				 * roughly 1 clock tick time
357 				 */
358 				if (apic_error_display_delay < 500)
359 					apic_error_display_delay *= 2;
360 			}
361 		}
362 		lock_clear(&apic_error_lock);
363 		return (DDI_INTR_CLAIMED);
364 	} else {
365 		lock_clear(&apic_error_lock);
366 		return (DDI_INTR_UNCLAIMED);
367 	}
368 }
369 
370 /*
371  * Turn off the mask bit in the performance counter Local Vector Table entry.
372  */
373 void
374 apic_cpcovf_mask_clear(void)
375 {
376 	apic_reg_ops->apic_write(APIC_PCINT_VECT,
377 	    (apic_reg_ops->apic_read(APIC_PCINT_VECT) & ~APIC_LVT_MASK));
378 }
379 
380 /*ARGSUSED*/
381 static int
382 apic_cmci_enable(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
383 {
384 	apic_reg_ops->apic_write(APIC_CMCI_VECT, apic_cmci_vect);
385 	return (0);
386 }
387 
388 /*ARGSUSED*/
389 static int
390 apic_cmci_disable(xc_arg_t arg1, xc_arg_t arg2, xc_arg_t arg3)
391 {
392 	apic_reg_ops->apic_write(APIC_CMCI_VECT, apic_cmci_vect | AV_MASK);
393 	return (0);
394 }
395 
396 /*ARGSUSED*/
397 int
398 cmci_cpu_setup(cpu_setup_t what, int cpuid, void *arg)
399 {
400 	cpuset_t	cpu_set;
401 
402 	CPUSET_ONLY(cpu_set, cpuid);
403 
404 	switch (what) {
405 		case CPU_ON:
406 			xc_call(NULL, NULL, NULL, CPUSET2BV(cpu_set),
407 			    (xc_func_t)apic_cmci_enable);
408 			break;
409 
410 		case CPU_OFF:
411 			xc_call(NULL, NULL, NULL, CPUSET2BV(cpu_set),
412 			    (xc_func_t)apic_cmci_disable);
413 			break;
414 
415 		default:
416 			break;
417 	}
418 
419 	return (0);
420 }
421 
422 static void
423 apic_disable_local_apic(void)
424 {
425 	apic_reg_ops->apic_write_task_reg(APIC_MASK_ALL);
426 	apic_reg_ops->apic_write(APIC_LOCAL_TIMER, AV_MASK);
427 
428 	/* local intr reg 0 */
429 	apic_reg_ops->apic_write(APIC_INT_VECT0, AV_MASK);
430 
431 	/* disable NMI */
432 	apic_reg_ops->apic_write(APIC_INT_VECT1, AV_MASK);
433 
434 	/* and error interrupt */
435 	apic_reg_ops->apic_write(APIC_ERR_VECT, AV_MASK);
436 
437 	/* and perf counter intr */
438 	apic_reg_ops->apic_write(APIC_PCINT_VECT, AV_MASK);
439 
440 	apic_reg_ops->apic_write(APIC_SPUR_INT_REG, APIC_SPUR_INTR);
441 }
442 
443 static void
444 apic_cpu_send_SIPI(processorid_t cpun, boolean_t start)
445 {
446 	int		loop_count;
447 	uint32_t	vector;
448 	uint_t		apicid;
449 	ulong_t		iflag;
450 
451 	apicid =  apic_cpus[cpun].aci_local_id;
452 
453 	/*
454 	 * Interrupts on current CPU will be disabled during the
455 	 * steps in order to avoid unwanted side effects from
456 	 * executing interrupt handlers on a problematic BIOS.
457 	 */
458 	iflag = intr_clear();
459 
460 	if (start) {
461 		outb(CMOS_ADDR, SSB);
462 		outb(CMOS_DATA, BIOS_SHUTDOWN);
463 	}
464 
465 	/*
466 	 * According to X2APIC specification in section '2.3.5.1' of
467 	 * Interrupt Command Register Semantics, the semantics of
468 	 * programming the Interrupt Command Register to dispatch an interrupt
469 	 * is simplified. A single MSR write to the 64-bit ICR is required
470 	 * for dispatching an interrupt. Specifically, with the 64-bit MSR
471 	 * interface to ICR, system software is not required to check the
472 	 * status of the delivery status bit prior to writing to the ICR
473 	 * to send an IPI. With the removal of the Delivery Status bit,
474 	 * system software no longer has a reason to read the ICR. It remains
475 	 * readable only to aid in debugging.
476 	 */
477 #ifdef	DEBUG
478 	APIC_AV_PENDING_SET();
479 #else
480 	if (apic_mode == LOCAL_APIC) {
481 		APIC_AV_PENDING_SET();
482 	}
483 #endif /* DEBUG */
484 
485 	/* for integrated - make sure there is one INIT IPI in buffer */
486 	/* for external - it will wake up the cpu */
487 	apic_reg_ops->apic_write_int_cmd(apicid, AV_ASSERT | AV_RESET);
488 
489 	/* If only 1 CPU is installed, PENDING bit will not go low */
490 	for (loop_count = apic_sipi_max_loop_count; loop_count; loop_count--) {
491 		if (apic_mode == LOCAL_APIC &&
492 		    apic_reg_ops->apic_read(APIC_INT_CMD1) & AV_PENDING)
493 			apic_ret();
494 		else
495 			break;
496 	}
497 
498 	apic_reg_ops->apic_write_int_cmd(apicid, AV_DEASSERT | AV_RESET);
499 	drv_usecwait(20000);		/* 20 milli sec */
500 
501 	if (apic_cpus[cpun].aci_local_ver >= APIC_INTEGRATED_VERS) {
502 		/* integrated apic */
503 
504 		vector = (rm_platter_pa >> MMU_PAGESHIFT) &
505 		    (APIC_VECTOR_MASK | APIC_IPL_MASK);
506 
507 		/* to offset the INIT IPI queue up in the buffer */
508 		apic_reg_ops->apic_write_int_cmd(apicid, vector | AV_STARTUP);
509 		drv_usecwait(200);		/* 20 micro sec */
510 
511 		/*
512 		 * send the second SIPI (Startup IPI) as recommended by Intel
513 		 * software development manual.
514 		 */
515 		apic_reg_ops->apic_write_int_cmd(apicid, vector | AV_STARTUP);
516 		drv_usecwait(200);	/* 20 micro sec */
517 	}
518 
519 	intr_restore(iflag);
520 }
521 
522 /*ARGSUSED1*/
523 int
524 apic_cpu_start(processorid_t cpun, caddr_t arg)
525 {
526 	ASSERT(MUTEX_HELD(&cpu_lock));
527 
528 	if (!apic_cpu_in_range(cpun)) {
529 		return (EINVAL);
530 	}
531 
532 	/*
533 	 * Switch to apic_common_send_ipi for safety during starting other CPUs.
534 	 */
535 	if (apic_mode == LOCAL_X2APIC) {
536 		apic_switch_ipi_callback(B_TRUE);
537 	}
538 
539 	apic_cmos_ssb_set = 1;
540 	apic_cpu_send_SIPI(cpun, B_TRUE);
541 
542 	return (0);
543 }
544 
545 /*
546  * Put CPU into halted state with interrupts disabled.
547  */
548 /*ARGSUSED1*/
549 int
550 apic_cpu_stop(processorid_t cpun, caddr_t arg)
551 {
552 	int		rc;
553 	cpu_t 		*cp;
554 	extern cpuset_t cpu_ready_set;
555 	extern void cpu_idle_intercept_cpu(cpu_t *cp);
556 
557 	ASSERT(MUTEX_HELD(&cpu_lock));
558 
559 	if (!apic_cpu_in_range(cpun)) {
560 		return (EINVAL);
561 	}
562 	if (apic_cpus[cpun].aci_local_ver < APIC_INTEGRATED_VERS) {
563 		return (ENOTSUP);
564 	}
565 
566 	cp = cpu_get(cpun);
567 	ASSERT(cp != NULL);
568 	ASSERT((cp->cpu_flags & CPU_OFFLINE) != 0);
569 	ASSERT((cp->cpu_flags & CPU_QUIESCED) != 0);
570 	ASSERT((cp->cpu_flags & CPU_ENABLE) == 0);
571 
572 	/* Clear CPU_READY flag to disable cross calls. */
573 	cp->cpu_flags &= ~CPU_READY;
574 	CPUSET_ATOMIC_DEL(cpu_ready_set, cpun);
575 	rc = xc_flush_cpu(cp);
576 	if (rc != 0) {
577 		CPUSET_ATOMIC_ADD(cpu_ready_set, cpun);
578 		cp->cpu_flags |= CPU_READY;
579 		return (rc);
580 	}
581 
582 	/* Intercept target CPU at a safe point before powering it off. */
583 	cpu_idle_intercept_cpu(cp);
584 
585 	apic_cpu_send_SIPI(cpun, B_FALSE);
586 	cp->cpu_flags &= ~CPU_RUNNING;
587 
588 	return (0);
589 }
590 
591 int
592 apic_cpu_ops(psm_cpu_request_t *reqp)
593 {
594 	if (reqp == NULL) {
595 		return (EINVAL);
596 	}
597 
598 	switch (reqp->pcr_cmd) {
599 	case PSM_CPU_ADD:
600 		return (apic_cpu_add(reqp));
601 
602 	case PSM_CPU_REMOVE:
603 		return (apic_cpu_remove(reqp));
604 
605 	case PSM_CPU_STOP:
606 		return (apic_cpu_stop(reqp->req.cpu_stop.cpuid,
607 		    reqp->req.cpu_stop.ctx));
608 
609 	default:
610 		return (ENOTSUP);
611 	}
612 }
613 
614 #ifdef	DEBUG
615 int	apic_break_on_cpu = 9;
616 int	apic_stretch_interrupts = 0;
617 int	apic_stretch_ISR = 1 << 3;	/* IPL of 3 matches nothing now */
618 #endif /* DEBUG */
619 
620 /*
621  * generates an interprocessor interrupt to another CPU. Any changes made to
622  * this routine must be accompanied by similar changes to
623  * apic_common_send_ipi().
624  */
625 void
626 apic_send_ipi(int cpun, int ipl)
627 {
628 	int vector;
629 	ulong_t flag;
630 
631 	vector = apic_resv_vector[ipl];
632 
633 	ASSERT((vector >= APIC_BASE_VECT) && (vector <= APIC_SPUR_INTR));
634 
635 	flag = intr_clear();
636 
637 	APIC_AV_PENDING_SET();
638 
639 	apic_reg_ops->apic_write_int_cmd(apic_cpus[cpun].aci_local_id,
640 	    vector);
641 
642 	intr_restore(flag);
643 }
644 
645 
646 /*ARGSUSED*/
647 void
648 apic_set_idlecpu(processorid_t cpun)
649 {
650 }
651 
652 /*ARGSUSED*/
653 void
654 apic_unset_idlecpu(processorid_t cpun)
655 {
656 }
657 
658 
659 void
660 apic_ret()
661 {
662 }
663 
664 /*
665  * If apic_coarse_time == 1, then apic_gettime() is used instead of
666  * apic_gethrtime().  This is used for performance instead of accuracy.
667  */
668 
669 hrtime_t
670 apic_gettime()
671 {
672 	int old_hrtime_stamp;
673 	hrtime_t temp;
674 
675 	/*
676 	 * In one-shot mode, we do not keep time, so if anyone
677 	 * calls psm_gettime() directly, we vector over to
678 	 * gethrtime().
679 	 * one-shot mode MUST NOT be enabled if this psm is the source of
680 	 * hrtime.
681 	 */
682 
683 	if (apic_oneshot)
684 		return (gethrtime());
685 
686 
687 gettime_again:
688 	while ((old_hrtime_stamp = apic_hrtime_stamp) & 1)
689 		apic_ret();
690 
691 	temp = apic_nsec_since_boot;
692 
693 	if (apic_hrtime_stamp != old_hrtime_stamp) {	/* got an interrupt */
694 		goto gettime_again;
695 	}
696 	return (temp);
697 }
698 
699 /*
700  * Here we return the number of nanoseconds since booting.  Note every
701  * clock interrupt increments apic_nsec_since_boot by the appropriate
702  * amount.
703  */
704 hrtime_t
705 apic_gethrtime(void)
706 {
707 	int curr_timeval, countval, elapsed_ticks;
708 	int old_hrtime_stamp, status;
709 	hrtime_t temp;
710 	uint32_t cpun;
711 	ulong_t oflags;
712 
713 	/*
714 	 * In one-shot mode, we do not keep time, so if anyone
715 	 * calls psm_gethrtime() directly, we vector over to
716 	 * gethrtime().
717 	 * one-shot mode MUST NOT be enabled if this psm is the source of
718 	 * hrtime.
719 	 */
720 
721 	if (apic_oneshot)
722 		return (gethrtime());
723 
724 	oflags = intr_clear();	/* prevent migration */
725 
726 	cpun = apic_reg_ops->apic_read(APIC_LID_REG);
727 	if (apic_mode == LOCAL_APIC)
728 		cpun >>= APIC_ID_BIT_OFFSET;
729 
730 	lock_set(&apic_gethrtime_lock);
731 
732 gethrtime_again:
733 	while ((old_hrtime_stamp = apic_hrtime_stamp) & 1)
734 		apic_ret();
735 
736 	/*
737 	 * Check to see which CPU we are on.  Note the time is kept on
738 	 * the local APIC of CPU 0.  If on CPU 0, simply read the current
739 	 * counter.  If on another CPU, issue a remote read command to CPU 0.
740 	 */
741 	if (cpun == apic_cpus[0].aci_local_id) {
742 		countval = apic_reg_ops->apic_read(APIC_CURR_COUNT);
743 	} else {
744 #ifdef	DEBUG
745 		APIC_AV_PENDING_SET();
746 #else
747 		if (apic_mode == LOCAL_APIC)
748 			APIC_AV_PENDING_SET();
749 #endif /* DEBUG */
750 
751 		apic_reg_ops->apic_write_int_cmd(
752 		    apic_cpus[0].aci_local_id, APIC_CURR_ADD | AV_REMOTE);
753 
754 		while ((status = apic_reg_ops->apic_read(APIC_INT_CMD1))
755 		    & AV_READ_PENDING) {
756 			apic_ret();
757 		}
758 
759 		if (status & AV_REMOTE_STATUS)	/* 1 = valid */
760 			countval = apic_reg_ops->apic_read(APIC_REMOTE_READ);
761 		else {	/* 0 = invalid */
762 			apic_remote_hrterr++;
763 			/*
764 			 * return last hrtime right now, will need more
765 			 * testing if change to retry
766 			 */
767 			temp = apic_last_hrtime;
768 
769 			lock_clear(&apic_gethrtime_lock);
770 
771 			intr_restore(oflags);
772 
773 			return (temp);
774 		}
775 	}
776 	if (countval > last_count_read)
777 		countval = 0;
778 	else
779 		last_count_read = countval;
780 
781 	elapsed_ticks = apic_hertz_count - countval;
782 
783 	curr_timeval = APIC_TICKS_TO_NSECS(elapsed_ticks);
784 	temp = apic_nsec_since_boot + curr_timeval;
785 
786 	if (apic_hrtime_stamp != old_hrtime_stamp) {	/* got an interrupt */
787 		/* we might have clobbered last_count_read. Restore it */
788 		last_count_read = apic_hertz_count;
789 		goto gethrtime_again;
790 	}
791 
792 	if (temp < apic_last_hrtime) {
793 		/* return last hrtime if error occurs */
794 		apic_hrtime_error++;
795 		temp = apic_last_hrtime;
796 	}
797 	else
798 		apic_last_hrtime = temp;
799 
800 	lock_clear(&apic_gethrtime_lock);
801 	intr_restore(oflags);
802 
803 	return (temp);
804 }
805 
806 /* apic NMI handler */
807 /*ARGSUSED*/
808 void
809 apic_nmi_intr(caddr_t arg, struct regs *rp)
810 {
811 	if (apic_shutdown_processors) {
812 		apic_disable_local_apic();
813 		return;
814 	}
815 
816 	apic_error |= APIC_ERR_NMI;
817 
818 	if (!lock_try(&apic_nmi_lock))
819 		return;
820 	apic_num_nmis++;
821 
822 	if (apic_kmdb_on_nmi && psm_debugger()) {
823 		debug_enter("NMI received: entering kmdb\n");
824 	} else if (apic_panic_on_nmi) {
825 		/* Keep panic from entering kmdb. */
826 		nopanicdebug = 1;
827 		panic("NMI received\n");
828 	} else {
829 		/*
830 		 * prom_printf is the best shot we have of something which is
831 		 * problem free from high level/NMI type of interrupts
832 		 */
833 		prom_printf("NMI received\n");
834 	}
835 
836 	lock_clear(&apic_nmi_lock);
837 }
838 
839 processorid_t
840 apic_get_next_processorid(processorid_t cpu_id)
841 {
842 
843 	int i;
844 
845 	if (cpu_id == -1)
846 		return ((processorid_t)0);
847 
848 	for (i = cpu_id + 1; i < NCPU; i++) {
849 		if (apic_cpu_in_range(i))
850 			return (i);
851 	}
852 
853 	return ((processorid_t)-1);
854 }
855 
856 int
857 apic_cpu_add(psm_cpu_request_t *reqp)
858 {
859 	int i, rv = 0;
860 	ulong_t iflag;
861 	boolean_t first = B_TRUE;
862 	uchar_t localver;
863 	uint32_t localid, procid;
864 	processorid_t cpuid = (processorid_t)-1;
865 	mach_cpu_add_arg_t *ap;
866 
867 	ASSERT(reqp != NULL);
868 	reqp->req.cpu_add.cpuid = (processorid_t)-1;
869 
870 	/* Check whether CPU hotplug is supported. */
871 	if (!plat_dr_support_cpu() || apic_max_nproc == -1) {
872 		return (ENOTSUP);
873 	}
874 
875 	ap = (mach_cpu_add_arg_t *)reqp->req.cpu_add.argp;
876 	switch (ap->type) {
877 	case MACH_CPU_ARG_LOCAL_APIC:
878 		localid = ap->arg.apic.apic_id;
879 		procid = ap->arg.apic.proc_id;
880 		if (localid >= 255 || procid > 255) {
881 			cmn_err(CE_WARN,
882 			    "!apic: apicid(%u) or procid(%u) is invalid.",
883 			    localid, procid);
884 			return (EINVAL);
885 		}
886 		break;
887 
888 	case MACH_CPU_ARG_LOCAL_X2APIC:
889 		localid = ap->arg.apic.apic_id;
890 		procid = ap->arg.apic.proc_id;
891 		if (localid >= UINT32_MAX) {
892 			cmn_err(CE_WARN,
893 			    "!apic: x2apicid(%u) is invalid.", localid);
894 			return (EINVAL);
895 		} else if (localid >= 255 && apic_mode == LOCAL_APIC) {
896 			cmn_err(CE_WARN, "!apic: system is in APIC mode, "
897 			    "can't support x2APIC processor.");
898 			return (ENOTSUP);
899 		}
900 		break;
901 
902 	default:
903 		cmn_err(CE_WARN,
904 		    "!apic: unknown argument type %d to apic_cpu_add().",
905 		    ap->type);
906 		return (EINVAL);
907 	}
908 
909 	/* Use apic_ioapic_lock to sync with apic_get_next_bind_cpu. */
910 	iflag = intr_clear();
911 	lock_set(&apic_ioapic_lock);
912 
913 	/* Check whether local APIC id already exists. */
914 	for (i = 0; i < apic_nproc; i++) {
915 		if (!CPU_IN_SET(apic_cpumask, i))
916 			continue;
917 		if (apic_cpus[i].aci_local_id == localid) {
918 			lock_clear(&apic_ioapic_lock);
919 			intr_restore(iflag);
920 			cmn_err(CE_WARN,
921 			    "!apic: local apic id %u already exists.",
922 			    localid);
923 			return (EEXIST);
924 		} else if (apic_cpus[i].aci_processor_id == procid) {
925 			lock_clear(&apic_ioapic_lock);
926 			intr_restore(iflag);
927 			cmn_err(CE_WARN,
928 			    "!apic: processor id %u already exists.",
929 			    (int)procid);
930 			return (EEXIST);
931 		}
932 
933 		/*
934 		 * There's no local APIC version number available in MADT table,
935 		 * so assume that all CPUs are homogeneous and use local APIC
936 		 * version number of the first existing CPU.
937 		 */
938 		if (first) {
939 			first = B_FALSE;
940 			localver = apic_cpus[i].aci_local_ver;
941 		}
942 	}
943 	ASSERT(first == B_FALSE);
944 
945 	/*
946 	 * Try to assign the same cpuid if APIC id exists in the dirty cache.
947 	 */
948 	for (i = 0; i < apic_max_nproc; i++) {
949 		if (CPU_IN_SET(apic_cpumask, i)) {
950 			ASSERT((apic_cpus[i].aci_status & APIC_CPU_FREE) == 0);
951 			continue;
952 		}
953 		ASSERT(apic_cpus[i].aci_status & APIC_CPU_FREE);
954 		if ((apic_cpus[i].aci_status & APIC_CPU_DIRTY) &&
955 		    apic_cpus[i].aci_local_id == localid &&
956 		    apic_cpus[i].aci_processor_id == procid) {
957 			cpuid = i;
958 			break;
959 		}
960 	}
961 
962 	/* Avoid the dirty cache and allocate fresh slot if possible. */
963 	if (cpuid == (processorid_t)-1) {
964 		for (i = 0; i < apic_max_nproc; i++) {
965 			if ((apic_cpus[i].aci_status & APIC_CPU_FREE) &&
966 			    (apic_cpus[i].aci_status & APIC_CPU_DIRTY) == 0) {
967 				cpuid = i;
968 				break;
969 			}
970 		}
971 	}
972 
973 	/* Try to find any free slot as last resort. */
974 	if (cpuid == (processorid_t)-1) {
975 		for (i = 0; i < apic_max_nproc; i++) {
976 			if (apic_cpus[i].aci_status & APIC_CPU_FREE) {
977 				cpuid = i;
978 				break;
979 			}
980 		}
981 	}
982 
983 	if (cpuid == (processorid_t)-1) {
984 		lock_clear(&apic_ioapic_lock);
985 		intr_restore(iflag);
986 		cmn_err(CE_NOTE,
987 		    "!apic: failed to allocate cpu id for processor %u.",
988 		    procid);
989 		rv = EAGAIN;
990 	} else if (ACPI_FAILURE(acpica_map_cpu(cpuid, procid))) {
991 		lock_clear(&apic_ioapic_lock);
992 		intr_restore(iflag);
993 		cmn_err(CE_NOTE,
994 		    "!apic: failed to build mapping for processor %u.",
995 		    procid);
996 		rv = EBUSY;
997 	} else {
998 		ASSERT(cpuid >= 0 && cpuid < NCPU);
999 		ASSERT(cpuid < apic_max_nproc && cpuid < max_ncpus);
1000 		bzero(&apic_cpus[cpuid], sizeof (apic_cpus[0]));
1001 		apic_cpus[cpuid].aci_processor_id = procid;
1002 		apic_cpus[cpuid].aci_local_id = localid;
1003 		apic_cpus[cpuid].aci_local_ver = localver;
1004 		CPUSET_ATOMIC_ADD(apic_cpumask, cpuid);
1005 		if (cpuid >= apic_nproc) {
1006 			apic_nproc = cpuid + 1;
1007 		}
1008 		lock_clear(&apic_ioapic_lock);
1009 		intr_restore(iflag);
1010 		reqp->req.cpu_add.cpuid = cpuid;
1011 	}
1012 
1013 	return (rv);
1014 }
1015 
1016 int
1017 apic_cpu_remove(psm_cpu_request_t *reqp)
1018 {
1019 	int i;
1020 	ulong_t iflag;
1021 	processorid_t cpuid;
1022 
1023 	/* Check whether CPU hotplug is supported. */
1024 	if (!plat_dr_support_cpu() || apic_max_nproc == -1) {
1025 		return (ENOTSUP);
1026 	}
1027 
1028 	cpuid = reqp->req.cpu_remove.cpuid;
1029 
1030 	/* Use apic_ioapic_lock to sync with apic_get_next_bind_cpu. */
1031 	iflag = intr_clear();
1032 	lock_set(&apic_ioapic_lock);
1033 
1034 	if (!apic_cpu_in_range(cpuid)) {
1035 		lock_clear(&apic_ioapic_lock);
1036 		intr_restore(iflag);
1037 		cmn_err(CE_WARN,
1038 		    "!apic: cpuid %d doesn't exist in apic_cpus array.",
1039 		    cpuid);
1040 		return (ENODEV);
1041 	}
1042 	ASSERT((apic_cpus[cpuid].aci_status & APIC_CPU_FREE) == 0);
1043 
1044 	if (ACPI_FAILURE(acpica_unmap_cpu(cpuid))) {
1045 		lock_clear(&apic_ioapic_lock);
1046 		intr_restore(iflag);
1047 		return (ENOENT);
1048 	}
1049 
1050 	if (cpuid == apic_nproc - 1) {
1051 		/*
1052 		 * We are removing the highest numbered cpuid so we need to
1053 		 * find the next highest cpuid as the new value for apic_nproc.
1054 		 */
1055 		for (i = apic_nproc; i > 0; i--) {
1056 			if (CPU_IN_SET(apic_cpumask, i - 1)) {
1057 				apic_nproc = i;
1058 				break;
1059 			}
1060 		}
1061 		/* at least one CPU left */
1062 		ASSERT(i > 0);
1063 	}
1064 	CPUSET_ATOMIC_DEL(apic_cpumask, cpuid);
1065 	/* mark slot as free and keep it in the dirty cache */
1066 	apic_cpus[cpuid].aci_status = APIC_CPU_FREE | APIC_CPU_DIRTY;
1067 
1068 	lock_clear(&apic_ioapic_lock);
1069 	intr_restore(iflag);
1070 
1071 	return (0);
1072 }
1073 
1074 /*
1075  * Return the number of APIC clock ticks elapsed for 8245 to decrement
1076  * (APIC_TIME_COUNT + pit_ticks_adj) ticks.
1077  */
1078 static uint_t
1079 apic_calibrate(volatile uint32_t *addr, uint16_t *pit_ticks_adj)
1080 {
1081 	uint8_t		pit_tick_lo;
1082 	uint16_t	pit_tick, target_pit_tick;
1083 	uint32_t	start_apic_tick, end_apic_tick;
1084 	ulong_t		iflag;
1085 	uint32_t	reg;
1086 
1087 	reg = addr + APIC_CURR_COUNT - apicadr;
1088 
1089 	iflag = intr_clear();
1090 
1091 	do {
1092 		pit_tick_lo = inb(PITCTR0_PORT);
1093 		pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo;
1094 	} while (pit_tick < APIC_TIME_MIN ||
1095 	    pit_tick_lo <= APIC_LB_MIN || pit_tick_lo >= APIC_LB_MAX);
1096 
1097 	/*
1098 	 * Wait for the 8254 to decrement by 5 ticks to ensure
1099 	 * we didn't start in the middle of a tick.
1100 	 * Compare with 0x10 for the wrap around case.
1101 	 */
1102 	target_pit_tick = pit_tick - 5;
1103 	do {
1104 		pit_tick_lo = inb(PITCTR0_PORT);
1105 		pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo;
1106 	} while (pit_tick > target_pit_tick || pit_tick_lo < 0x10);
1107 
1108 	start_apic_tick = apic_reg_ops->apic_read(reg);
1109 
1110 	/*
1111 	 * Wait for the 8254 to decrement by
1112 	 * (APIC_TIME_COUNT + pit_ticks_adj) ticks
1113 	 */
1114 	target_pit_tick = pit_tick - APIC_TIME_COUNT;
1115 	do {
1116 		pit_tick_lo = inb(PITCTR0_PORT);
1117 		pit_tick = (inb(PITCTR0_PORT) << 8) | pit_tick_lo;
1118 	} while (pit_tick > target_pit_tick || pit_tick_lo < 0x10);
1119 
1120 	end_apic_tick = apic_reg_ops->apic_read(reg);
1121 
1122 	*pit_ticks_adj = target_pit_tick - pit_tick;
1123 
1124 	intr_restore(iflag);
1125 
1126 	return (start_apic_tick - end_apic_tick);
1127 }
1128 
1129 /*
1130  * Initialise the APIC timer on the local APIC of CPU 0 to the desired
1131  * frequency.  Note at this stage in the boot sequence, the boot processor
1132  * is the only active processor.
1133  * hertz value of 0 indicates a one-shot mode request.  In this case
1134  * the function returns the resolution (in nanoseconds) for the hardware
1135  * timer interrupt.  If one-shot mode capability is not available,
1136  * the return value will be 0. apic_enable_oneshot is a global switch
1137  * for disabling the functionality.
1138  * A non-zero positive value for hertz indicates a periodic mode request.
1139  * In this case the hardware will be programmed to generate clock interrupts
1140  * at hertz frequency and returns the resolution of interrupts in
1141  * nanosecond.
1142  */
1143 
1144 int
1145 apic_clkinit(int hertz)
1146 {
1147 	uint_t		apic_ticks = 0;
1148 	uint_t		pit_ticks;
1149 	int		ret;
1150 	uint16_t	pit_ticks_adj;
1151 	static int	firsttime = 1;
1152 
1153 	if (firsttime) {
1154 		/* first time calibrate on CPU0 only */
1155 
1156 		apic_reg_ops->apic_write(APIC_DIVIDE_REG, apic_divide_reg_init);
1157 		apic_reg_ops->apic_write(APIC_INIT_COUNT, APIC_MAXVAL);
1158 		apic_ticks = apic_calibrate(apicadr, &pit_ticks_adj);
1159 
1160 		/* total number of PIT ticks corresponding to apic_ticks */
1161 		pit_ticks = APIC_TIME_COUNT + pit_ticks_adj;
1162 
1163 		/*
1164 		 * Determine the number of nanoseconds per APIC clock tick
1165 		 * and then determine how many APIC ticks to interrupt at the
1166 		 * desired frequency
1167 		 * apic_ticks / (pitticks / PIT_HZ) = apic_ticks_per_s
1168 		 * (apic_ticks * PIT_HZ) / pitticks = apic_ticks_per_s
1169 		 * apic_ticks_per_ns = (apic_ticks * PIT_HZ) / (pitticks * 10^9)
1170 		 * pic_ticks_per_SFns =
1171 		 *   (SF * apic_ticks * PIT_HZ) / (pitticks * 10^9)
1172 		 */
1173 		apic_ticks_per_SFnsecs =
1174 		    ((SF * apic_ticks * PIT_HZ) /
1175 		    ((uint64_t)pit_ticks * NANOSEC));
1176 
1177 		/* the interval timer initial count is 32 bit max */
1178 		apic_nsec_max = APIC_TICKS_TO_NSECS(APIC_MAXVAL);
1179 		firsttime = 0;
1180 	}
1181 
1182 	if (hertz != 0) {
1183 		/* periodic */
1184 		apic_nsec_per_intr = NANOSEC / hertz;
1185 		apic_hertz_count = APIC_NSECS_TO_TICKS(apic_nsec_per_intr);
1186 	}
1187 
1188 	apic_int_busy_mark = (apic_int_busy_mark *
1189 	    apic_sample_factor_redistribution) / 100;
1190 	apic_int_free_mark = (apic_int_free_mark *
1191 	    apic_sample_factor_redistribution) / 100;
1192 	apic_diff_for_redistribution = (apic_diff_for_redistribution *
1193 	    apic_sample_factor_redistribution) / 100;
1194 
1195 	if (hertz == 0) {
1196 		/* requested one_shot */
1197 		if (!tsc_gethrtime_enable || !apic_oneshot_enable)
1198 			return (0);
1199 		apic_oneshot = 1;
1200 		ret = (int)APIC_TICKS_TO_NSECS(1);
1201 	} else {
1202 		/* program the local APIC to interrupt at the given frequency */
1203 		apic_reg_ops->apic_write(APIC_INIT_COUNT, apic_hertz_count);
1204 		apic_reg_ops->apic_write(APIC_LOCAL_TIMER,
1205 		    (apic_clkvect + APIC_BASE_VECT) | AV_TIME);
1206 		apic_oneshot = 0;
1207 		ret = NANOSEC / hertz;
1208 	}
1209 
1210 	return (ret);
1211 
1212 }
1213 
1214 /*
1215  * apic_preshutdown:
1216  * Called early in shutdown whilst we can still access filesystems to do
1217  * things like loading modules which will be required to complete shutdown
1218  * after filesystems are all unmounted.
1219  */
1220 void
1221 apic_preshutdown(int cmd, int fcn)
1222 {
1223 	APIC_VERBOSE_POWEROFF(("apic_preshutdown(%d,%d); m=%d a=%d\n",
1224 	    cmd, fcn, apic_poweroff_method, apic_enable_acpi));
1225 }
1226 
1227 void
1228 apic_shutdown(int cmd, int fcn)
1229 {
1230 	int restarts, attempts;
1231 	int i;
1232 	uchar_t	byte;
1233 	ulong_t iflag;
1234 
1235 	hpet_acpi_fini();
1236 
1237 	/* Send NMI to all CPUs except self to do per processor shutdown */
1238 	iflag = intr_clear();
1239 #ifdef	DEBUG
1240 	APIC_AV_PENDING_SET();
1241 #else
1242 	if (apic_mode == LOCAL_APIC)
1243 		APIC_AV_PENDING_SET();
1244 #endif /* DEBUG */
1245 	apic_shutdown_processors = 1;
1246 	apic_reg_ops->apic_write(APIC_INT_CMD1,
1247 	    AV_NMI | AV_LEVEL | AV_SH_ALL_EXCSELF);
1248 
1249 	/* restore cmos shutdown byte before reboot */
1250 	if (apic_cmos_ssb_set) {
1251 		outb(CMOS_ADDR, SSB);
1252 		outb(CMOS_DATA, 0);
1253 	}
1254 
1255 	ioapic_disable_redirection();
1256 
1257 	/*	disable apic mode if imcr present	*/
1258 	if (apic_imcrp) {
1259 		outb(APIC_IMCR_P1, (uchar_t)APIC_IMCR_SELECT);
1260 		outb(APIC_IMCR_P2, (uchar_t)APIC_IMCR_PIC);
1261 	}
1262 
1263 	apic_disable_local_apic();
1264 
1265 	intr_restore(iflag);
1266 
1267 	/* remainder of function is for shutdown cases only */
1268 	if (cmd != A_SHUTDOWN)
1269 		return;
1270 
1271 	/*
1272 	 * Switch system back into Legacy-Mode if using ACPI and
1273 	 * not powering-off.  Some BIOSes need to remain in ACPI-mode
1274 	 * for power-off to succeed (Dell Dimension 4600)
1275 	 * Do not disable ACPI while doing fastreboot
1276 	 */
1277 	if (apic_enable_acpi && fcn != AD_POWEROFF && fcn != AD_FASTREBOOT)
1278 		(void) AcpiDisable();
1279 
1280 	if (fcn == AD_FASTREBOOT) {
1281 		apic_reg_ops->apic_write(APIC_INT_CMD1,
1282 		    AV_ASSERT | AV_RESET | AV_SH_ALL_EXCSELF);
1283 	}
1284 
1285 	/* remainder of function is for shutdown+poweroff case only */
1286 	if (fcn != AD_POWEROFF)
1287 		return;
1288 
1289 	switch (apic_poweroff_method) {
1290 		case APIC_POWEROFF_VIA_RTC:
1291 
1292 			/* select the extended NVRAM bank in the RTC */
1293 			outb(CMOS_ADDR, RTC_REGA);
1294 			byte = inb(CMOS_DATA);
1295 			outb(CMOS_DATA, (byte | EXT_BANK));
1296 
1297 			outb(CMOS_ADDR, PFR_REG);
1298 
1299 			/* for Predator must toggle the PAB bit */
1300 			byte = inb(CMOS_DATA);
1301 
1302 			/*
1303 			 * clear power active bar, wakeup alarm and
1304 			 * kickstart
1305 			 */
1306 			byte &= ~(PAB_CBIT | WF_FLAG | KS_FLAG);
1307 			outb(CMOS_DATA, byte);
1308 
1309 			/* delay before next write */
1310 			drv_usecwait(1000);
1311 
1312 			/* for S40 the following would suffice */
1313 			byte = inb(CMOS_DATA);
1314 
1315 			/* power active bar control bit */
1316 			byte |= PAB_CBIT;
1317 			outb(CMOS_DATA, byte);
1318 
1319 			break;
1320 
1321 		case APIC_POWEROFF_VIA_ASPEN_BMC:
1322 			restarts = 0;
1323 restart_aspen_bmc:
1324 			if (++restarts == 3)
1325 				break;
1326 			attempts = 0;
1327 			do {
1328 				byte = inb(MISMIC_FLAG_REGISTER);
1329 				byte &= MISMIC_BUSY_MASK;
1330 				if (byte != 0) {
1331 					drv_usecwait(1000);
1332 					if (attempts >= 3)
1333 						goto restart_aspen_bmc;
1334 					++attempts;
1335 				}
1336 			} while (byte != 0);
1337 			outb(MISMIC_CNTL_REGISTER, CC_SMS_GET_STATUS);
1338 			byte = inb(MISMIC_FLAG_REGISTER);
1339 			byte |= 0x1;
1340 			outb(MISMIC_FLAG_REGISTER, byte);
1341 			i = 0;
1342 			for (; i < (sizeof (aspen_bmc)/sizeof (aspen_bmc[0]));
1343 			    i++) {
1344 				attempts = 0;
1345 				do {
1346 					byte = inb(MISMIC_FLAG_REGISTER);
1347 					byte &= MISMIC_BUSY_MASK;
1348 					if (byte != 0) {
1349 						drv_usecwait(1000);
1350 						if (attempts >= 3)
1351 							goto restart_aspen_bmc;
1352 						++attempts;
1353 					}
1354 				} while (byte != 0);
1355 				outb(MISMIC_CNTL_REGISTER, aspen_bmc[i].cntl);
1356 				outb(MISMIC_DATA_REGISTER, aspen_bmc[i].data);
1357 				byte = inb(MISMIC_FLAG_REGISTER);
1358 				byte |= 0x1;
1359 				outb(MISMIC_FLAG_REGISTER, byte);
1360 			}
1361 			break;
1362 
1363 		case APIC_POWEROFF_VIA_SITKA_BMC:
1364 			restarts = 0;
1365 restart_sitka_bmc:
1366 			if (++restarts == 3)
1367 				break;
1368 			attempts = 0;
1369 			do {
1370 				byte = inb(SMS_STATUS_REGISTER);
1371 				byte &= SMS_STATE_MASK;
1372 				if ((byte == SMS_READ_STATE) ||
1373 				    (byte == SMS_WRITE_STATE)) {
1374 					drv_usecwait(1000);
1375 					if (attempts >= 3)
1376 						goto restart_sitka_bmc;
1377 					++attempts;
1378 				}
1379 			} while ((byte == SMS_READ_STATE) ||
1380 			    (byte == SMS_WRITE_STATE));
1381 			outb(SMS_COMMAND_REGISTER, SMS_GET_STATUS);
1382 			i = 0;
1383 			for (; i < (sizeof (sitka_bmc)/sizeof (sitka_bmc[0]));
1384 			    i++) {
1385 				attempts = 0;
1386 				do {
1387 					byte = inb(SMS_STATUS_REGISTER);
1388 					byte &= SMS_IBF_MASK;
1389 					if (byte != 0) {
1390 						drv_usecwait(1000);
1391 						if (attempts >= 3)
1392 							goto restart_sitka_bmc;
1393 						++attempts;
1394 					}
1395 				} while (byte != 0);
1396 				outb(sitka_bmc[i].port, sitka_bmc[i].data);
1397 			}
1398 			break;
1399 
1400 		case APIC_POWEROFF_NONE:
1401 
1402 			/* If no APIC direct method, we will try using ACPI */
1403 			if (apic_enable_acpi) {
1404 				if (acpi_poweroff() == 1)
1405 					return;
1406 			} else
1407 				return;
1408 
1409 			break;
1410 	}
1411 	/*
1412 	 * Wait a limited time here for power to go off.
1413 	 * If the power does not go off, then there was a
1414 	 * problem and we should continue to the halt which
1415 	 * prints a message for the user to press a key to
1416 	 * reboot.
1417 	 */
1418 	drv_usecwait(7000000); /* wait seven seconds */
1419 
1420 }
1421 
1422 /*
1423  * This function will reprogram the timer.
1424  *
1425  * When in oneshot mode the argument is the absolute time in future to
1426  * generate the interrupt at.
1427  *
1428  * When in periodic mode, the argument is the interval at which the
1429  * interrupts should be generated. There is no need to support the periodic
1430  * mode timer change at this time.
1431  */
1432 void
1433 apic_timer_reprogram(hrtime_t time)
1434 {
1435 	hrtime_t now;
1436 	uint_t ticks;
1437 	int64_t delta;
1438 
1439 	/*
1440 	 * We should be called from high PIL context (CBE_HIGH_PIL),
1441 	 * so kpreempt is disabled.
1442 	 */
1443 
1444 	if (!apic_oneshot) {
1445 		/* time is the interval for periodic mode */
1446 		ticks = APIC_NSECS_TO_TICKS(time);
1447 	} else {
1448 		/* one shot mode */
1449 
1450 		now = gethrtime();
1451 		delta = time - now;
1452 
1453 		if (delta <= 0) {
1454 			/*
1455 			 * requested to generate an interrupt in the past
1456 			 * generate an interrupt as soon as possible
1457 			 */
1458 			ticks = apic_min_timer_ticks;
1459 		} else if (delta > apic_nsec_max) {
1460 			/*
1461 			 * requested to generate an interrupt at a time
1462 			 * further than what we are capable of. Set to max
1463 			 * the hardware can handle
1464 			 */
1465 
1466 			ticks = APIC_MAXVAL;
1467 #ifdef DEBUG
1468 			cmn_err(CE_CONT, "apic_timer_reprogram, request at"
1469 			    "  %lld  too far in future, current time"
1470 			    "  %lld \n", time, now);
1471 #endif
1472 		} else
1473 			ticks = APIC_NSECS_TO_TICKS(delta);
1474 	}
1475 
1476 	if (ticks < apic_min_timer_ticks)
1477 		ticks = apic_min_timer_ticks;
1478 
1479 	apic_reg_ops->apic_write(APIC_INIT_COUNT, ticks);
1480 }
1481 
1482 /*
1483  * This function will enable timer interrupts.
1484  */
1485 void
1486 apic_timer_enable(void)
1487 {
1488 	/*
1489 	 * We should be Called from high PIL context (CBE_HIGH_PIL),
1490 	 * so kpreempt is disabled.
1491 	 */
1492 
1493 	if (!apic_oneshot) {
1494 		apic_reg_ops->apic_write(APIC_LOCAL_TIMER,
1495 		    (apic_clkvect + APIC_BASE_VECT) | AV_TIME);
1496 	} else {
1497 		/* one shot */
1498 		apic_reg_ops->apic_write(APIC_LOCAL_TIMER,
1499 		    (apic_clkvect + APIC_BASE_VECT));
1500 	}
1501 }
1502 
1503 /*
1504  * This function will disable timer interrupts.
1505  */
1506 void
1507 apic_timer_disable(void)
1508 {
1509 	/*
1510 	 * We should be Called from high PIL context (CBE_HIGH_PIL),
1511 	 * so kpreempt is disabled.
1512 	 */
1513 	apic_reg_ops->apic_write(APIC_LOCAL_TIMER,
1514 	    (apic_clkvect + APIC_BASE_VECT) | AV_MASK);
1515 }
1516 
1517 /*
1518  * Set timer far into the future and return timer
1519  * current Count in nanoseconds.
1520  */
1521 hrtime_t
1522 apic_timer_stop_count(void)
1523 {
1524 	hrtime_t	ns_val;
1525 	int		enable_val, count_val;
1526 
1527 	/*
1528 	 * Should be called with interrupts disabled.
1529 	 */
1530 	ASSERT(!interrupts_enabled());
1531 
1532 	enable_val = apic_reg_ops->apic_read(APIC_LOCAL_TIMER);
1533 	if ((enable_val & AV_MASK) == AV_MASK)
1534 		return ((hrtime_t)-1);		/* timer is disabled */
1535 
1536 	count_val = apic_reg_ops->apic_read(APIC_CURR_COUNT);
1537 	ns_val = APIC_TICKS_TO_NSECS(count_val);
1538 
1539 	apic_reg_ops->apic_write(APIC_INIT_COUNT, APIC_MAXVAL);
1540 
1541 	return (ns_val);
1542 }
1543 
1544 /*
1545  * Reprogram timer after Deep C-State.
1546  */
1547 void
1548 apic_timer_restart(hrtime_t time)
1549 {
1550 	apic_timer_reprogram(time);
1551 }
1552 
1553 ddi_periodic_t apic_periodic_id;
1554 
1555 /*
1556  * The following functions are in the platform specific file so that they
1557  * can be different functions depending on whether we are running on
1558  * bare metal or a hypervisor.
1559  */
1560 
1561 /*
1562  * map an apic for memory-mapped access
1563  */
1564 uint32_t *
1565 mapin_apic(uint32_t addr, size_t len, int flags)
1566 {
1567 	return ((void *)psm_map_phys(addr, len, flags));
1568 }
1569 
1570 uint32_t *
1571 mapin_ioapic(uint32_t addr, size_t len, int flags)
1572 {
1573 	return (mapin_apic(addr, len, flags));
1574 }
1575 
1576 /*
1577  * unmap an apic
1578  */
1579 void
1580 mapout_apic(caddr_t addr, size_t len)
1581 {
1582 	psm_unmap_phys(addr, len);
1583 }
1584 
1585 void
1586 mapout_ioapic(caddr_t addr, size_t len)
1587 {
1588 	mapout_apic(addr, len);
1589 }
1590 
1591 uint32_t
1592 ioapic_read(int ioapic_ix, uint32_t reg)
1593 {
1594 	volatile uint32_t *ioapic;
1595 
1596 	ioapic = apicioadr[ioapic_ix];
1597 	ioapic[APIC_IO_REG] = reg;
1598 	return (ioapic[APIC_IO_DATA]);
1599 }
1600 
1601 void
1602 ioapic_write(int ioapic_ix, uint32_t reg, uint32_t value)
1603 {
1604 	volatile uint32_t *ioapic;
1605 
1606 	ioapic = apicioadr[ioapic_ix];
1607 	ioapic[APIC_IO_REG] = reg;
1608 	ioapic[APIC_IO_DATA] = value;
1609 }
1610 
1611 void
1612 ioapic_write_eoi(int ioapic_ix, uint32_t value)
1613 {
1614 	volatile uint32_t *ioapic;
1615 
1616 	ioapic = apicioadr[ioapic_ix];
1617 	ioapic[APIC_IO_EOI] = value;
1618 }
1619 
1620 /*
1621  * Round-robin algorithm to find the next CPU with interrupts enabled.
1622  * It can't share the same static variable apic_next_bind_cpu with
1623  * apic_get_next_bind_cpu(), since that will cause all interrupts to be
1624  * bound to CPU1 at boot time.  During boot, only CPU0 is online with
1625  * interrupts enabled when apic_get_next_bind_cpu() and apic_find_cpu()
1626  * are called.  However, the pcplusmp driver assumes that there will be
1627  * boot_ncpus CPUs configured eventually so it tries to distribute all
1628  * interrupts among CPU0 - CPU[boot_ncpus - 1].  Thus to prevent all
1629  * interrupts being targetted at CPU1, we need to use a dedicated static
1630  * variable for find_next_cpu() instead of sharing apic_next_bind_cpu.
1631  */
1632 
1633 processorid_t
1634 apic_find_cpu(int flag)
1635 {
1636 	int i;
1637 	static processorid_t acid = 0;
1638 
1639 	/* Find the first CPU with the passed-in flag set */
1640 	for (i = 0; i < apic_nproc; i++) {
1641 		if (++acid >= apic_nproc) {
1642 			acid = 0;
1643 		}
1644 		if (apic_cpu_in_range(acid) &&
1645 		    (apic_cpus[acid].aci_status & flag)) {
1646 			break;
1647 		}
1648 	}
1649 
1650 	ASSERT((apic_cpus[acid].aci_status & flag) != 0);
1651 	return (acid);
1652 }
1653 
1654 /*
1655  * Switch between safe and x2APIC IPI sending method.
1656  * CPU may power on in xapic mode or x2apic mode. If CPU needs to send IPI to
1657  * other CPUs before entering x2APIC mode, it still needs to xAPIC method.
1658  * Before sending StartIPI to target CPU, psm_send_ipi will be changed to
1659  * apic_common_send_ipi, which detects current local APIC mode and use right
1660  * method to send IPI. If some CPUs fail to start up, apic_poweron_cnt
1661  * won't return to zero, so apic_common_send_ipi will always be used.
1662  * psm_send_ipi can't be simply changed back to x2apic_send_ipi if some CPUs
1663  * failed to start up because those failed CPUs may recover itself later at
1664  * unpredictable time.
1665  */
1666 void
1667 apic_switch_ipi_callback(boolean_t enter)
1668 {
1669 	ulong_t iflag;
1670 	struct psm_ops *pops = psmops;
1671 
1672 	iflag = intr_clear();
1673 	lock_set(&apic_mode_switch_lock);
1674 	if (enter) {
1675 		ASSERT(apic_poweron_cnt >= 0);
1676 		if (apic_poweron_cnt == 0) {
1677 			pops->psm_send_ipi = apic_common_send_ipi;
1678 			send_dirintf = pops->psm_send_ipi;
1679 		}
1680 		apic_poweron_cnt++;
1681 	} else {
1682 		ASSERT(apic_poweron_cnt > 0);
1683 		apic_poweron_cnt--;
1684 		if (apic_poweron_cnt == 0) {
1685 			pops->psm_send_ipi = x2apic_send_ipi;
1686 			send_dirintf = pops->psm_send_ipi;
1687 		}
1688 	}
1689 	lock_clear(&apic_mode_switch_lock);
1690 	intr_restore(iflag);
1691 }
1692 
1693 void
1694 apic_intrmap_init(int apic_mode)
1695 {
1696 	int suppress_brdcst_eoi = 0;
1697 
1698 	if (psm_vt_ops != NULL) {
1699 		/*
1700 		 * Since X2APIC requires the use of interrupt remapping
1701 		 * (though this is not documented explicitly in the Intel
1702 		 * documentation (yet)), initialize interrupt remapping
1703 		 * support before initializing the X2APIC unit.
1704 		 */
1705 		if (((apic_intrmap_ops_t *)psm_vt_ops)->
1706 		    apic_intrmap_init(apic_mode) == DDI_SUCCESS) {
1707 
1708 			apic_vt_ops = psm_vt_ops;
1709 
1710 			/*
1711 			 * We leverage the interrupt remapping engine to
1712 			 * suppress broadcast EOI; thus we must send the
1713 			 * directed EOI with the directed-EOI handler.
1714 			 */
1715 			if (apic_directed_EOI_supported() == 0) {
1716 				suppress_brdcst_eoi = 1;
1717 			}
1718 
1719 			apic_vt_ops->apic_intrmap_enable(suppress_brdcst_eoi);
1720 
1721 			if (apic_detect_x2apic()) {
1722 				apic_enable_x2apic();
1723 			}
1724 
1725 			if (apic_directed_EOI_supported() == 0) {
1726 				apic_set_directed_EOI_handler();
1727 			}
1728 		}
1729 	}
1730 }
1731 
1732 /*ARGSUSED*/
1733 static void
1734 apic_record_ioapic_rdt(void *intrmap_private, ioapic_rdt_t *irdt)
1735 {
1736 	irdt->ir_hi <<= APIC_ID_BIT_OFFSET;
1737 }
1738 
1739 /*ARGSUSED*/
1740 static void
1741 apic_record_msi(void *intrmap_private, msi_regs_t *mregs)
1742 {
1743 	mregs->mr_addr = MSI_ADDR_HDR |
1744 	    (MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) |
1745 	    (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT) |
1746 	    (mregs->mr_addr << MSI_ADDR_DEST_SHIFT);
1747 	mregs->mr_data = (MSI_DATA_TM_EDGE << MSI_DATA_TM_SHIFT) |
1748 	    mregs->mr_data;
1749 }
1750 
1751 /*
1752  * Functions from apic_introp.c
1753  *
1754  * Those functions are used by apic_intr_ops().
1755  */
1756 
1757 /*
1758  * MSI support flag:
1759  * reflects whether MSI is supported at APIC level
1760  * it can also be patched through /etc/system
1761  *
1762  *  0 = default value - don't know and need to call apic_check_msi_support()
1763  *      to find out then set it accordingly
1764  *  1 = supported
1765  * -1 = not supported
1766  */
1767 int	apic_support_msi = 0;
1768 
1769 /* Multiple vector support for MSI-X */
1770 int	apic_msix_enable = 1;
1771 
1772 /* Multiple vector support for MSI */
1773 int	apic_multi_msi_enable = 1;
1774 
1775 /*
1776  * check whether the system supports MSI
1777  *
1778  * If PCI-E capability is found, then this must be a PCI-E system.
1779  * Since MSI is required for PCI-E system, it returns PSM_SUCCESS
1780  * to indicate this system supports MSI.
1781  */
1782 int
1783 apic_check_msi_support()
1784 {
1785 	dev_info_t *cdip;
1786 	char dev_type[16];
1787 	int dev_len;
1788 
1789 	DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support:\n"));
1790 
1791 	/*
1792 	 * check whether the first level children of root_node have
1793 	 * PCI-E capability
1794 	 */
1795 	for (cdip = ddi_get_child(ddi_root_node()); cdip != NULL;
1796 	    cdip = ddi_get_next_sibling(cdip)) {
1797 
1798 		DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: cdip: 0x%p,"
1799 		    " driver: %s, binding: %s, nodename: %s\n", (void *)cdip,
1800 		    ddi_driver_name(cdip), ddi_binding_name(cdip),
1801 		    ddi_node_name(cdip)));
1802 		dev_len = sizeof (dev_type);
1803 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
1804 		    "device_type", (caddr_t)dev_type, &dev_len)
1805 		    != DDI_PROP_SUCCESS)
1806 			continue;
1807 		if (strcmp(dev_type, "pciex") == 0)
1808 			return (PSM_SUCCESS);
1809 	}
1810 
1811 	/* MSI is not supported on this system */
1812 	DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: no 'pciex' "
1813 	    "device_type found\n"));
1814 	return (PSM_FAILURE);
1815 }
1816 
1817 /*
1818  * apic_pci_msi_unconfigure:
1819  *
1820  * This and next two interfaces are copied from pci_intr_lib.c
1821  * Do ensure that these two files stay in sync.
1822  * These needed to be copied over here to avoid a deadlock situation on
1823  * certain mp systems that use MSI interrupts.
1824  *
1825  * IMPORTANT regards next three interfaces:
1826  * i) are called only for MSI/X interrupts.
1827  * ii) called with interrupts disabled, and must not block
1828  */
1829 void
1830 apic_pci_msi_unconfigure(dev_info_t *rdip, int type, int inum)
1831 {
1832 	ushort_t		msi_ctrl;
1833 	int			cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip);
1834 	ddi_acc_handle_t	handle = i_ddi_get_pci_config_handle(rdip);
1835 
1836 	ASSERT((handle != NULL) && (cap_ptr != 0));
1837 
1838 	if (type == DDI_INTR_TYPE_MSI) {
1839 		msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
1840 		msi_ctrl &= (~PCI_MSI_MME_MASK);
1841 		pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
1842 		pci_config_put32(handle, cap_ptr + PCI_MSI_ADDR_OFFSET, 0);
1843 
1844 		if (msi_ctrl &  PCI_MSI_64BIT_MASK) {
1845 			pci_config_put16(handle,
1846 			    cap_ptr + PCI_MSI_64BIT_DATA, 0);
1847 			pci_config_put32(handle,
1848 			    cap_ptr + PCI_MSI_ADDR_OFFSET + 4, 0);
1849 		} else {
1850 			pci_config_put16(handle,
1851 			    cap_ptr + PCI_MSI_32BIT_DATA, 0);
1852 		}
1853 
1854 	} else if (type == DDI_INTR_TYPE_MSIX) {
1855 		uintptr_t	off;
1856 		uint32_t	mask;
1857 		ddi_intr_msix_t	*msix_p = i_ddi_get_msix(rdip);
1858 
1859 		ASSERT(msix_p != NULL);
1860 
1861 		/* Offset into "inum"th entry in the MSI-X table & mask it */
1862 		off = (uintptr_t)msix_p->msix_tbl_addr + (inum *
1863 		    PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET;
1864 
1865 		mask = ddi_get32(msix_p->msix_tbl_hdl, (uint32_t *)off);
1866 
1867 		ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, (mask | 1));
1868 
1869 		/* Offset into the "inum"th entry in the MSI-X table */
1870 		off = (uintptr_t)msix_p->msix_tbl_addr +
1871 		    (inum * PCI_MSIX_VECTOR_SIZE);
1872 
1873 		/* Reset the "data" and "addr" bits */
1874 		ddi_put32(msix_p->msix_tbl_hdl,
1875 		    (uint32_t *)(off + PCI_MSIX_DATA_OFFSET), 0);
1876 		ddi_put64(msix_p->msix_tbl_hdl, (uint64_t *)off, 0);
1877 	}
1878 }
1879 
1880 /*
1881  * apic_pci_msi_disable_mode:
1882  */
1883 void
1884 apic_pci_msi_disable_mode(dev_info_t *rdip, int type)
1885 {
1886 	ushort_t		msi_ctrl;
1887 	int			cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip);
1888 	ddi_acc_handle_t	handle = i_ddi_get_pci_config_handle(rdip);
1889 
1890 	ASSERT((handle != NULL) && (cap_ptr != 0));
1891 
1892 	if (type == DDI_INTR_TYPE_MSI) {
1893 		msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
1894 		if (!(msi_ctrl & PCI_MSI_ENABLE_BIT))
1895 			return;
1896 
1897 		msi_ctrl &= ~PCI_MSI_ENABLE_BIT;	/* MSI disable */
1898 		pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
1899 
1900 	} else if (type == DDI_INTR_TYPE_MSIX) {
1901 		msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL);
1902 		if (msi_ctrl & PCI_MSIX_ENABLE_BIT) {
1903 			msi_ctrl &= ~PCI_MSIX_ENABLE_BIT;
1904 			pci_config_put16(handle, cap_ptr + PCI_MSIX_CTRL,
1905 			    msi_ctrl);
1906 		}
1907 	}
1908 }
1909 
1910 uint32_t
1911 apic_get_localapicid(uint32_t cpuid)
1912 {
1913 	ASSERT(cpuid < apic_nproc && apic_cpus != NULL);
1914 
1915 	return (apic_cpus[cpuid].aci_local_id);
1916 }
1917 
1918 uchar_t
1919 apic_get_ioapicid(uchar_t ioapicindex)
1920 {
1921 	ASSERT(ioapicindex < MAX_IO_APIC);
1922 
1923 	return (apic_io_id[ioapicindex]);
1924 }
1925