xref: /illumos-gate/usr/src/uts/i86pc/io/apix/apix_utils.c (revision 8119dad84d6416f13557b0ba8e2aaf9064cbcfd3)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
24  */
25 /*
26  * Copyright (c) 2010, Intel Corporation.
27  * All rights reserved.
28  */
29 /*
30  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
31  * Copyright 2013 Pluribus Networks, Inc.
32  * Copyright 2019 Joyent, Inc.
33  */
34 
35 #include <sys/processor.h>
36 #include <sys/time.h>
37 #include <sys/psm.h>
38 #include <sys/smp_impldefs.h>
39 #include <sys/cram.h>
40 #include <sys/acpi/acpi.h>
41 #include <sys/acpica.h>
42 #include <sys/psm_common.h>
43 #include <sys/pit.h>
44 #include <sys/ddi.h>
45 #include <sys/sunddi.h>
46 #include <sys/ddi_impldefs.h>
47 #include <sys/pci.h>
48 #include <sys/promif.h>
49 #include <sys/x86_archext.h>
50 #include <sys/cpc_impl.h>
51 #include <sys/uadmin.h>
52 #include <sys/panic.h>
53 #include <sys/debug.h>
54 #include <sys/archsystm.h>
55 #include <sys/trap.h>
56 #include <sys/machsystm.h>
57 #include <sys/sysmacros.h>
58 #include <sys/cpuvar.h>
59 #include <sys/rm_platter.h>
60 #include <sys/privregs.h>
61 #include <sys/note.h>
62 #include <sys/pci_intr_lib.h>
63 #include <sys/spl.h>
64 #include <sys/clock.h>
65 #include <sys/dditypes.h>
66 #include <sys/sunddi.h>
67 #include <sys/x_call.h>
68 #include <sys/reboot.h>
69 #include <sys/apix.h>
70 #include <sys/smt.h>
71 
72 static int apix_get_avail_vector_oncpu(uint32_t, int, int);
73 static apix_vector_t *apix_init_vector(processorid_t, uchar_t);
74 static void apix_cleanup_vector(apix_vector_t *);
75 static void apix_insert_av(apix_vector_t *, void *, avfunc, caddr_t, caddr_t,
76     uint64_t *, int, dev_info_t *);
77 static void apix_remove_av(apix_vector_t *, struct autovec *);
78 static void apix_clear_dev_map(dev_info_t *, int, int);
79 static boolean_t apix_is_cpu_enabled(processorid_t);
80 static void apix_wait_till_seen(processorid_t, int);
81 
82 #define	GET_INTR_INUM(ihdlp)		\
83 	(((ihdlp) != NULL) ? ((ddi_intr_handle_impl_t *)(ihdlp))->ih_inum : 0)
84 
85 apix_rebind_info_t apix_rebindinfo = {0, 0, 0, NULL, 0, NULL};
86 
87 /*
88  * Allocate IPI
89  *
90  * Return vector number or 0 on error
91  */
92 uchar_t
93 apix_alloc_ipi(int ipl)
94 {
95 	apix_vector_t *vecp;
96 	uchar_t vector;
97 	int cpun;
98 	int nproc;
99 
100 	APIX_ENTER_CPU_LOCK(0);
101 
102 	vector = apix_get_avail_vector_oncpu(0, APIX_IPI_MIN, APIX_IPI_MAX);
103 	if (vector == 0) {
104 		APIX_LEAVE_CPU_LOCK(0);
105 		cmn_err(CE_WARN, "apix: no available IPI\n");
106 		apic_error |= APIC_ERR_GET_IPIVECT_FAIL;
107 		return (0);
108 	}
109 
110 	nproc = max(apic_nproc, apic_max_nproc);
111 	for (cpun = 0; cpun < nproc; cpun++) {
112 		vecp = xv_vector(cpun, vector);
113 		if (vecp == NULL) {
114 			vecp = kmem_zalloc(sizeof (apix_vector_t), KM_NOSLEEP);
115 			if (vecp == NULL) {
116 				cmn_err(CE_WARN, "apix: No memory for ipi");
117 				goto fail;
118 			}
119 			xv_vector(cpun, vector) = vecp;
120 		}
121 		vecp->v_state = APIX_STATE_ALLOCED;
122 		vecp->v_type = APIX_TYPE_IPI;
123 		vecp->v_cpuid = vecp->v_bound_cpuid = cpun;
124 		vecp->v_vector = vector;
125 		vecp->v_pri = ipl;
126 	}
127 	APIX_LEAVE_CPU_LOCK(0);
128 	return (vector);
129 
130 fail:
131 	while (--cpun >= 0)
132 		apix_cleanup_vector(xv_vector(cpun, vector));
133 	APIX_LEAVE_CPU_LOCK(0);
134 	return (0);
135 }
136 
137 /*
138  * Add IPI service routine
139  */
140 static int
141 apix_add_ipi(int ipl, avfunc xxintr, char *name, int vector,
142     caddr_t arg1, caddr_t arg2)
143 {
144 	int cpun;
145 	apix_vector_t *vecp;
146 	int nproc;
147 
148 	ASSERT(vector >= APIX_IPI_MIN && vector <= APIX_IPI_MAX);
149 
150 	nproc = max(apic_nproc, apic_max_nproc);
151 	for (cpun = 0; cpun < nproc; cpun++) {
152 		APIX_ENTER_CPU_LOCK(cpun);
153 		vecp = xv_vector(cpun, vector);
154 		apix_insert_av(vecp, NULL, xxintr, arg1, arg2, NULL, ipl, NULL);
155 		vecp->v_state = APIX_STATE_ENABLED;
156 		APIX_LEAVE_CPU_LOCK(cpun);
157 	}
158 
159 	APIC_VERBOSE(IPI, (CE_CONT, "apix: add ipi for %s, vector %x "
160 	    "ipl %x\n", name, vector, ipl));
161 
162 	return (1);
163 }
164 
165 /*
166  * Find and return first free vector in range (start, end)
167  */
168 static int
169 apix_get_avail_vector_oncpu(uint32_t cpuid, int start, int end)
170 {
171 	int i;
172 	apix_impl_t *apixp = apixs[cpuid];
173 
174 	for (i = start; i <= end; i++) {
175 		if (APIC_CHECK_RESERVE_VECTORS(i))
176 			continue;
177 		if (IS_VECT_FREE(apixp->x_vectbl[i]))
178 			return (i);
179 	}
180 
181 	return (0);
182 }
183 
184 /*
185  * Allocate a vector on specified cpu
186  *
187  * Return NULL on error
188  */
189 static apix_vector_t *
190 apix_alloc_vector_oncpu(uint32_t cpuid, dev_info_t *dip, int inum, int type)
191 {
192 	processorid_t tocpu = cpuid & ~IRQ_USER_BOUND;
193 	apix_vector_t *vecp;
194 	int vector;
195 
196 	ASSERT(APIX_CPU_LOCK_HELD(tocpu));
197 
198 	/* find free vector */
199 	vector = apix_get_avail_vector_oncpu(tocpu, APIX_AVINTR_MIN,
200 	    APIX_AVINTR_MAX);
201 	if (vector == 0)
202 		return (NULL);
203 
204 	vecp = apix_init_vector(tocpu, vector);
205 	vecp->v_type = (ushort_t)type;
206 	vecp->v_inum = inum;
207 	vecp->v_flags = (cpuid & IRQ_USER_BOUND) ? APIX_VECT_USER_BOUND : 0;
208 
209 	if (dip != NULL)
210 		apix_set_dev_map(vecp, dip, inum);
211 
212 	return (vecp);
213 }
214 
215 /*
216  * Allocates "count" contiguous MSI vectors starting at the proper alignment.
217  * Caller needs to make sure that count has to be power of 2 and should not
218  * be < 1.
219  *
220  * Return first vector number
221  */
222 apix_vector_t *
223 apix_alloc_nvectors_oncpu(uint32_t cpuid, dev_info_t *dip, int inum,
224     int count, int type)
225 {
226 	int i, msibits, start = 0, navail = 0;
227 	apix_vector_t *vecp, *startp = NULL;
228 	processorid_t tocpu = cpuid & ~IRQ_USER_BOUND;
229 	uint_t flags;
230 
231 	ASSERT(APIX_CPU_LOCK_HELD(tocpu));
232 
233 	/*
234 	 * msibits is the no. of lower order message data bits for the
235 	 * allocated MSI vectors and is used to calculate the aligned
236 	 * starting vector
237 	 */
238 	msibits = count - 1;
239 
240 	/* It has to be contiguous */
241 	for (i = APIX_AVINTR_MIN; i <= APIX_AVINTR_MAX; i++) {
242 		if (!IS_VECT_FREE(xv_vector(tocpu, i)))
243 			continue;
244 
245 		/*
246 		 * starting vector has to be aligned accordingly for
247 		 * multiple MSIs
248 		 */
249 		if (msibits)
250 			i = (i + msibits) & ~msibits;
251 
252 		for (navail = 0, start = i; i <= APIX_AVINTR_MAX; i++) {
253 			if (!IS_VECT_FREE(xv_vector(tocpu, i)))
254 				break;
255 			if (APIC_CHECK_RESERVE_VECTORS(i))
256 				break;
257 			if (++navail == count)
258 				goto done;
259 		}
260 	}
261 
262 	return (NULL);
263 
264 done:
265 	flags = (cpuid & IRQ_USER_BOUND) ? APIX_VECT_USER_BOUND : 0;
266 
267 	for (i = 0; i < count; i++) {
268 		if ((vecp = apix_init_vector(tocpu, start + i)) == NULL)
269 			goto fail;
270 
271 		vecp->v_type = (ushort_t)type;
272 		vecp->v_inum = inum + i;
273 		vecp->v_flags = flags;
274 
275 		if (dip != NULL)
276 			apix_set_dev_map(vecp, dip, inum + i);
277 
278 		if (i == 0)
279 			startp = vecp;
280 	}
281 
282 	return (startp);
283 
284 fail:
285 	while (i-- > 0) {	/* Free allocated vectors */
286 		vecp = xv_vector(tocpu, start + i);
287 		apix_clear_dev_map(dip, inum + i, type);
288 		apix_cleanup_vector(vecp);
289 	}
290 	return (NULL);
291 }
292 
293 #define	APIX_WRITE_MSI_DATA(_hdl, _cap, _ctrl, _v)\
294 do {\
295 	if ((_ctrl) & PCI_MSI_64BIT_MASK)\
296 		pci_config_put16((_hdl), (_cap) + PCI_MSI_64BIT_DATA, (_v));\
297 	else\
298 		pci_config_put16((_hdl), (_cap) + PCI_MSI_32BIT_DATA, (_v));\
299 _NOTE(CONSTCOND)} while (0)
300 
301 static void
302 apix_pci_msi_enable_vector(apix_vector_t *vecp, dev_info_t *dip, int type,
303     int inum, int count, uchar_t vector, int target_apic_id)
304 {
305 	uint64_t		msi_addr, msi_data;
306 	ushort_t		msi_ctrl;
307 	int			i, cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip);
308 	ddi_acc_handle_t	handle = i_ddi_get_pci_config_handle(dip);
309 	msi_regs_t		msi_regs;
310 	void			*intrmap_tbl[PCI_MSI_MAX_INTRS];
311 
312 	DDI_INTR_IMPLDBG((CE_CONT, "apix_pci_msi_enable_vector: dip=0x%p\n"
313 	    "\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip,
314 	    ddi_driver_name(dip), inum, vector, target_apic_id));
315 
316 	ASSERT((handle != NULL) && (cap_ptr != 0));
317 
318 	msi_regs.mr_data = vector;
319 	msi_regs.mr_addr = target_apic_id;
320 
321 	for (i = 0; i < count; i++)
322 		intrmap_tbl[i] = xv_intrmap_private(vecp->v_cpuid, vector + i);
323 	apic_vt_ops->apic_intrmap_alloc_entry(intrmap_tbl, dip, type,
324 	    count, 0xff);
325 	for (i = 0; i < count; i++)
326 		xv_intrmap_private(vecp->v_cpuid, vector + i) = intrmap_tbl[i];
327 
328 	apic_vt_ops->apic_intrmap_map_entry(vecp->v_intrmap_private,
329 	    (void *)&msi_regs, type, count);
330 	apic_vt_ops->apic_intrmap_record_msi(vecp->v_intrmap_private,
331 	    &msi_regs);
332 
333 	/* MSI Address */
334 	msi_addr = msi_regs.mr_addr;
335 
336 	/* MSI Data: MSI is edge triggered according to spec */
337 	msi_data = msi_regs.mr_data;
338 
339 	DDI_INTR_IMPLDBG((CE_CONT, "apix_pci_msi_enable_vector: addr=0x%lx "
340 	    "data=0x%lx\n", (long)msi_addr, (long)msi_data));
341 
342 	if (type == APIX_TYPE_MSI) {
343 		msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
344 
345 		/* Set the bits to inform how many MSIs are enabled */
346 		msi_ctrl |= ((highbit(count) - 1) << PCI_MSI_MME_SHIFT);
347 		pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
348 
349 		if ((vecp->v_flags & APIX_VECT_MASKABLE) == 0)
350 			APIX_WRITE_MSI_DATA(handle, cap_ptr, msi_ctrl,
351 			    APIX_RESV_VECTOR);
352 
353 		pci_config_put32(handle,
354 		    cap_ptr + PCI_MSI_ADDR_OFFSET, msi_addr);
355 		if (msi_ctrl &  PCI_MSI_64BIT_MASK)
356 			pci_config_put32(handle,
357 			    cap_ptr + PCI_MSI_ADDR_OFFSET + 4, msi_addr >> 32);
358 
359 		APIX_WRITE_MSI_DATA(handle, cap_ptr, msi_ctrl, msi_data);
360 	} else if (type == APIX_TYPE_MSIX) {
361 		uintptr_t	off;
362 		ddi_intr_msix_t	*msix_p = i_ddi_get_msix(dip);
363 
364 		/* Offset into the "inum"th entry in the MSI-X table */
365 		off = (uintptr_t)msix_p->msix_tbl_addr +
366 		    (inum * PCI_MSIX_VECTOR_SIZE);
367 
368 		ddi_put32(msix_p->msix_tbl_hdl,
369 		    (uint32_t *)(off + PCI_MSIX_DATA_OFFSET), msi_data);
370 		ddi_put32(msix_p->msix_tbl_hdl,
371 		    (uint32_t *)(off + PCI_MSIX_LOWER_ADDR_OFFSET), msi_addr);
372 		ddi_put32(msix_p->msix_tbl_hdl,
373 		    (uint32_t *)(off + PCI_MSIX_UPPER_ADDR_OFFSET),
374 		    msi_addr >> 32);
375 	}
376 }
377 
378 static void
379 apix_pci_msi_enable_mode(dev_info_t *dip, int type, int inum)
380 {
381 	ushort_t		msi_ctrl;
382 	int			cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip);
383 	ddi_acc_handle_t	handle = i_ddi_get_pci_config_handle(dip);
384 
385 	ASSERT((handle != NULL) && (cap_ptr != 0));
386 
387 	if (type == APIX_TYPE_MSI) {
388 		msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
389 		if ((msi_ctrl & PCI_MSI_ENABLE_BIT))
390 			return;
391 
392 		msi_ctrl |= PCI_MSI_ENABLE_BIT;
393 		pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
394 
395 	} else if (type == DDI_INTR_TYPE_MSIX) {
396 		uintptr_t	off;
397 		uint32_t	mask;
398 		ddi_intr_msix_t	*msix_p;
399 
400 		msix_p = i_ddi_get_msix(dip);
401 
402 		/* Offset into "inum"th entry in the MSI-X table & clear mask */
403 		off = (uintptr_t)msix_p->msix_tbl_addr + (inum *
404 		    PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET;
405 
406 		mask = ddi_get32(msix_p->msix_tbl_hdl, (uint32_t *)off);
407 
408 		ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, (mask & ~1));
409 
410 		msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL);
411 
412 		if (!(msi_ctrl & PCI_MSIX_ENABLE_BIT)) {
413 			msi_ctrl |= PCI_MSIX_ENABLE_BIT;
414 			pci_config_put16(handle, cap_ptr + PCI_MSIX_CTRL,
415 			    msi_ctrl);
416 		}
417 	}
418 }
419 
420 /*
421  * Setup interrupt, pogramming IO-APIC or MSI/X address/data.
422  */
423 void
424 apix_enable_vector(apix_vector_t *vecp)
425 {
426 	int tocpu = vecp->v_cpuid, type = vecp->v_type;
427 	apic_cpus_info_t *cpu_infop;
428 	ulong_t iflag;
429 
430 	ASSERT(tocpu < apic_nproc);
431 
432 	cpu_infop = &apic_cpus[tocpu];
433 	if (vecp->v_flags & APIX_VECT_USER_BOUND)
434 		cpu_infop->aci_bound++;
435 	else
436 		cpu_infop->aci_temp_bound++;
437 
438 	iflag = intr_clear();
439 	lock_set(&apic_ioapic_lock);
440 
441 	if (!DDI_INTR_IS_MSI_OR_MSIX(type)) {	/* fixed */
442 		apix_intx_enable(vecp->v_inum);
443 	} else {
444 		int inum = vecp->v_inum;
445 		dev_info_t *dip = APIX_GET_DIP(vecp);
446 		int count = i_ddi_intr_get_current_nintrs(dip);
447 
448 		if (type == APIX_TYPE_MSI) {	/* MSI */
449 			if (inum == apix_get_max_dev_inum(dip, type)) {
450 				/* last one */
451 				uchar_t start_inum = inum + 1 - count;
452 				uchar_t start_vect = vecp->v_vector + 1 - count;
453 				apix_vector_t *start_vecp =
454 				    xv_vector(vecp->v_cpuid, start_vect);
455 
456 				APIC_VERBOSE(INTR, (CE_CONT, "apix: call "
457 				    "apix_pci_msi_enable_vector\n"));
458 				apix_pci_msi_enable_vector(start_vecp, dip,
459 				    type, start_inum, count, start_vect,
460 				    cpu_infop->aci_local_id);
461 
462 				APIC_VERBOSE(INTR, (CE_CONT, "apix: call "
463 				    "apix_pci_msi_enable_mode\n"));
464 				apix_pci_msi_enable_mode(dip, type, inum);
465 			}
466 		} else {				/* MSI-X */
467 			apix_pci_msi_enable_vector(vecp, dip,
468 			    type, inum, 1, vecp->v_vector,
469 			    cpu_infop->aci_local_id);
470 			apix_pci_msi_enable_mode(dip, type, inum);
471 		}
472 	}
473 	vecp->v_state = APIX_STATE_ENABLED;
474 	apic_redist_cpu_skip &= ~(1 << tocpu);
475 
476 	lock_clear(&apic_ioapic_lock);
477 	intr_restore(iflag);
478 }
479 
480 /*
481  * Disable the interrupt
482  */
483 void
484 apix_disable_vector(apix_vector_t *vecp)
485 {
486 	struct autovec *avp = vecp->v_autovect;
487 	ulong_t iflag;
488 
489 	ASSERT(avp != NULL);
490 
491 	iflag = intr_clear();
492 	lock_set(&apic_ioapic_lock);
493 
494 	switch (vecp->v_type) {
495 	case APIX_TYPE_MSI:
496 		ASSERT(avp->av_vector != NULL && avp->av_dip != NULL);
497 		/*
498 		 * Disable the MSI vector
499 		 * Make sure we only disable on the last
500 		 * of the multi-MSI support
501 		 */
502 		if (i_ddi_intr_get_current_nenables(avp->av_dip) == 1) {
503 			apic_pci_msi_disable_mode(avp->av_dip,
504 			    DDI_INTR_TYPE_MSI);
505 		}
506 		break;
507 	case APIX_TYPE_MSIX:
508 		ASSERT(avp->av_vector != NULL && avp->av_dip != NULL);
509 		/*
510 		 * Disable the MSI-X vector
511 		 * needs to clear its mask and addr/data for each MSI-X
512 		 */
513 		apic_pci_msi_unconfigure(avp->av_dip, DDI_INTR_TYPE_MSIX,
514 		    vecp->v_inum);
515 		/*
516 		 * Make sure we only disable on the last MSI-X
517 		 */
518 		if (i_ddi_intr_get_current_nenables(avp->av_dip) == 1) {
519 			apic_pci_msi_disable_mode(avp->av_dip,
520 			    DDI_INTR_TYPE_MSIX);
521 		}
522 		break;
523 	default:
524 		apix_intx_disable(vecp->v_inum);
525 		break;
526 	}
527 
528 	if (!(apic_cpus[vecp->v_cpuid].aci_status & APIC_CPU_SUSPEND))
529 		vecp->v_state = APIX_STATE_DISABLED;
530 	apic_vt_ops->apic_intrmap_free_entry(&vecp->v_intrmap_private);
531 	vecp->v_intrmap_private = NULL;
532 
533 	lock_clear(&apic_ioapic_lock);
534 	intr_restore(iflag);
535 }
536 
537 /*
538  * Mark vector as obsoleted or freed. The vector is marked
539  * obsoleted if there are pending requests on it. Otherwise,
540  * free the vector. The obsoleted vectors get freed after
541  * being serviced.
542  *
543  * Return 1 on being obosoleted and 0 on being freed.
544  */
545 #define	INTR_BUSY(_avp)\
546 	((((volatile ushort_t)(_avp)->av_flags) &\
547 	(AV_PENTRY_PEND | AV_PENTRY_ONPROC)) != 0)
548 #define	LOCAL_WITH_INTR_DISABLED(_cpuid)\
549 	((_cpuid) == psm_get_cpu_id() && !interrupts_enabled())
550 static uint64_t dummy_tick;
551 
552 int
553 apix_obsolete_vector(apix_vector_t *vecp)
554 {
555 	struct autovec *avp = vecp->v_autovect;
556 	int repeats, tries, ipl, busy = 0, cpuid = vecp->v_cpuid;
557 	apix_impl_t *apixp = apixs[cpuid];
558 
559 	ASSERT(APIX_CPU_LOCK_HELD(cpuid));
560 
561 	for (avp = vecp->v_autovect; avp != NULL; avp = avp->av_link) {
562 		if (avp->av_vector == NULL)
563 			continue;
564 
565 		if (LOCAL_WITH_INTR_DISABLED(cpuid)) {
566 			int bit, index, irr;
567 
568 			if (INTR_BUSY(avp)) {
569 				busy++;
570 				continue;
571 			}
572 
573 			/* check IRR for pending interrupts */
574 			index = vecp->v_vector / 32;
575 			bit = vecp->v_vector % 32;
576 			irr = apic_reg_ops->apic_read(APIC_IRR_REG + index);
577 			if ((irr & (1 << bit)) != 0)
578 				busy++;
579 
580 			if (!busy)
581 				apix_remove_av(vecp, avp);
582 
583 			continue;
584 		}
585 
586 		repeats = 0;
587 		do {
588 			repeats++;
589 			for (tries = 0; tries < apic_max_reps_clear_pending;
590 			    tries++)
591 				if (!INTR_BUSY(avp))
592 					break;
593 		} while (INTR_BUSY(avp) &&
594 		    (repeats < apic_max_reps_clear_pending));
595 
596 		if (INTR_BUSY(avp))
597 			busy++;
598 		else {
599 			/*
600 			 * Interrupt is not in pending list or being serviced.
601 			 * However it might be cached in Local APIC's IRR
602 			 * register. It's impossible to check another CPU's
603 			 * IRR register. Then wait till lower levels finish
604 			 * running.
605 			 */
606 			for (ipl = 1; ipl < MIN(LOCK_LEVEL, vecp->v_pri); ipl++)
607 				apix_wait_till_seen(cpuid, ipl);
608 			if (INTR_BUSY(avp))
609 				busy++;
610 		}
611 
612 		if (!busy)
613 			apix_remove_av(vecp, avp);
614 	}
615 
616 	if (busy) {
617 		apix_vector_t *tp = apixp->x_obsoletes;
618 
619 		if (vecp->v_state == APIX_STATE_OBSOLETED)
620 			return (1);
621 
622 		vecp->v_state = APIX_STATE_OBSOLETED;
623 		vecp->v_next = NULL;
624 		if (tp == NULL)
625 			apixp->x_obsoletes = vecp;
626 		else {
627 			while (tp->v_next != NULL)
628 				tp = tp->v_next;
629 			tp->v_next = vecp;
630 		}
631 		return (1);
632 	}
633 
634 	/* interrupt is not busy */
635 	if (vecp->v_state == APIX_STATE_OBSOLETED) {
636 		/* remove from obsoleted list */
637 		apixp->x_obsoletes = vecp->v_next;
638 		vecp->v_next = NULL;
639 	}
640 	apix_cleanup_vector(vecp);
641 	return (0);
642 }
643 
644 /*
645  * Duplicate number of continuous vectors to specified target vectors.
646  */
647 static void
648 apix_dup_vectors(apix_vector_t *oldp, apix_vector_t *newp, int count)
649 {
650 	struct autovec *avp;
651 	apix_vector_t *fromp, *top;
652 	processorid_t oldcpu = oldp->v_cpuid, newcpu = newp->v_cpuid;
653 	uchar_t oldvec = oldp->v_vector, newvec = newp->v_vector;
654 	int i, inum;
655 
656 	ASSERT(oldp->v_type != APIX_TYPE_IPI);
657 
658 	for (i = 0; i < count; i++) {
659 		fromp = xv_vector(oldcpu, oldvec + i);
660 		top = xv_vector(newcpu, newvec + i);
661 		ASSERT(fromp != NULL && top != NULL);
662 
663 		/* copy over original one */
664 		top->v_state = fromp->v_state;
665 		top->v_type = fromp->v_type;
666 		top->v_bound_cpuid = fromp->v_bound_cpuid;
667 		top->v_inum = fromp->v_inum;
668 		top->v_flags = fromp->v_flags;
669 		top->v_intrmap_private = fromp->v_intrmap_private;
670 
671 		for (avp = fromp->v_autovect; avp != NULL; avp = avp->av_link) {
672 			if (avp->av_vector == NULL)
673 				continue;
674 
675 			apix_insert_av(top, avp->av_intr_id, avp->av_vector,
676 			    avp->av_intarg1, avp->av_intarg2, avp->av_ticksp,
677 			    avp->av_prilevel, avp->av_dip);
678 
679 			if (fromp->v_type == APIX_TYPE_FIXED &&
680 			    avp->av_dip != NULL) {
681 				inum = GET_INTR_INUM(avp->av_intr_id);
682 				apix_set_dev_map(top, avp->av_dip, inum);
683 			}
684 		}
685 
686 		if (DDI_INTR_IS_MSI_OR_MSIX(fromp->v_type) &&
687 		    fromp->v_devp != NULL)
688 			apix_set_dev_map(top, fromp->v_devp->dv_dip,
689 			    fromp->v_devp->dv_inum);
690 	}
691 }
692 
693 static apix_vector_t *
694 apix_init_vector(processorid_t cpuid, uchar_t vector)
695 {
696 	apix_impl_t *apixp = apixs[cpuid];
697 	apix_vector_t *vecp = apixp->x_vectbl[vector];
698 
699 	ASSERT(IS_VECT_FREE(vecp));
700 
701 	if (vecp == NULL) {
702 		vecp = kmem_zalloc(sizeof (apix_vector_t), KM_NOSLEEP);
703 		if (vecp == NULL) {
704 			cmn_err(CE_WARN, "apix: no memory to allocate vector");
705 			return (NULL);
706 		}
707 		apixp->x_vectbl[vector] = vecp;
708 	}
709 	vecp->v_state = APIX_STATE_ALLOCED;
710 	vecp->v_cpuid = vecp->v_bound_cpuid = cpuid;
711 	vecp->v_vector = vector;
712 
713 	return (vecp);
714 }
715 
716 static void
717 apix_cleanup_vector(apix_vector_t *vecp)
718 {
719 	ASSERT(vecp->v_share == 0);
720 	vecp->v_bound_cpuid = IRQ_UNINIT;
721 	vecp->v_state = APIX_STATE_FREED;
722 	vecp->v_type = 0;
723 	vecp->v_flags = 0;
724 	vecp->v_busy = 0;
725 	vecp->v_intrmap_private = NULL;
726 }
727 
728 static void
729 apix_dprint_vector(apix_vector_t *vecp, dev_info_t *dip, int count)
730 {
731 #ifdef DEBUG
732 	major_t major;
733 	char *name, *drv_name;
734 	int instance, len, t_len;
735 	char mesg[1024] = "apix: ";
736 
737 	t_len = sizeof (mesg);
738 	len = strlen(mesg);
739 	if (dip != NULL) {
740 		name = ddi_get_name(dip);
741 		major = ddi_name_to_major(name);
742 		drv_name = ddi_major_to_name(major);
743 		instance = ddi_get_instance(dip);
744 		(void) snprintf(mesg + len, t_len - len, "%s (%s) instance %d ",
745 		    name, drv_name, instance);
746 	}
747 	len = strlen(mesg);
748 
749 	switch (vecp->v_type) {
750 	case APIX_TYPE_FIXED:
751 		(void) snprintf(mesg + len, t_len - len, "irqno %d",
752 		    vecp->v_inum);
753 		break;
754 	case APIX_TYPE_MSI:
755 		(void) snprintf(mesg + len, t_len - len,
756 		    "msi inum %d (count %d)", vecp->v_inum, count);
757 		break;
758 	case APIX_TYPE_MSIX:
759 		(void) snprintf(mesg + len, t_len - len, "msi-x inum %d",
760 		    vecp->v_inum);
761 		break;
762 	default:
763 		break;
764 
765 	}
766 
767 	APIC_VERBOSE(ALLOC, (CE_CONT, "%s allocated with vector 0x%x on "
768 	    "cpu %d\n", mesg, vecp->v_vector, vecp->v_cpuid));
769 #endif	/* DEBUG */
770 }
771 
772 /*
773  * Operations on avintr
774  */
775 
776 #define	INIT_AUTOVEC(p, intr_id, f, arg1, arg2, ticksp, ipl, dip)	\
777 do { \
778 	(p)->av_intr_id = intr_id;	\
779 	(p)->av_vector = f;		\
780 	(p)->av_intarg1 = arg1;		\
781 	(p)->av_intarg2 = arg2;		\
782 	(p)->av_ticksp = ticksp;	\
783 	(p)->av_prilevel = ipl;		\
784 	(p)->av_dip = dip;		\
785 	(p)->av_flags = 0;		\
786 _NOTE(CONSTCOND)} while (0)
787 
788 /*
789  * Insert an interrupt service routine into chain by its priority from
790  * high to low
791  */
792 static void
793 apix_insert_av(apix_vector_t *vecp, void *intr_id, avfunc f, caddr_t arg1,
794     caddr_t arg2, uint64_t *ticksp, int ipl, dev_info_t *dip)
795 {
796 	struct autovec *p, *prep, *mem;
797 
798 	APIC_VERBOSE(INTR, (CE_CONT, "apix_insert_av: dip %p, vector 0x%x, "
799 	    "cpu %d\n", (void *)dip, vecp->v_vector, vecp->v_cpuid));
800 
801 	mem = kmem_zalloc(sizeof (struct autovec), KM_SLEEP);
802 	INIT_AUTOVEC(mem, intr_id, f, arg1, arg2, ticksp, ipl, dip);
803 	if (vecp->v_type == APIX_TYPE_FIXED && apic_level_intr[vecp->v_inum])
804 		mem->av_flags |= AV_PENTRY_LEVEL;
805 
806 	vecp->v_share++;
807 	vecp->v_pri = (ipl > vecp->v_pri) ? ipl : vecp->v_pri;
808 
809 	smt_intr_alloc_pil(vecp->v_pri);
810 
811 	if (vecp->v_autovect == NULL) {	/* Nothing on list - put it at head */
812 		vecp->v_autovect = mem;
813 		return;
814 	}
815 
816 	if (DDI_INTR_IS_MSI_OR_MSIX(vecp->v_type)) {	/* MSI/X */
817 		ASSERT(vecp->v_share == 1);	/* No sharing for MSI/X */
818 
819 		INIT_AUTOVEC(vecp->v_autovect, intr_id, f, arg1, arg2, ticksp,
820 		    ipl, dip);
821 		prep = vecp->v_autovect->av_link;
822 		vecp->v_autovect->av_link = NULL;
823 
824 		/* Free the following autovect chain */
825 		while (prep != NULL) {
826 			ASSERT(prep->av_vector == NULL);
827 
828 			p = prep;
829 			prep = prep->av_link;
830 			kmem_free(p, sizeof (struct autovec));
831 		}
832 
833 		kmem_free(mem, sizeof (struct autovec));
834 		return;
835 	}
836 
837 	/* find where it goes in list */
838 	prep = NULL;
839 	for (p = vecp->v_autovect; p != NULL; p = p->av_link) {
840 		if (p->av_vector && p->av_prilevel <= ipl)
841 			break;
842 		prep = p;
843 	}
844 	if (prep != NULL) {
845 		if (prep->av_vector == NULL) {	/* freed struct available */
846 			INIT_AUTOVEC(prep, intr_id, f, arg1, arg2,
847 			    ticksp, ipl, dip);
848 			prep->av_flags = mem->av_flags;
849 			kmem_free(mem, sizeof (struct autovec));
850 			return;
851 		}
852 
853 		mem->av_link = prep->av_link;
854 		prep->av_link = mem;
855 	} else {
856 		/* insert new intpt at beginning of chain */
857 		mem->av_link = vecp->v_autovect;
858 		vecp->v_autovect = mem;
859 	}
860 }
861 
862 /*
863  * After having made a change to an autovector list, wait until we have
864  * seen specified cpu not executing an interrupt at that level--so we
865  * know our change has taken effect completely (no old state in registers,
866  * etc).
867  */
868 #define	APIX_CPU_ENABLED(_cp) \
869 	(quiesce_active == 0 && \
870 	(((_cp)->cpu_flags & (CPU_QUIESCED|CPU_OFFLINE)) == 0))
871 
872 static void
873 apix_wait_till_seen(processorid_t cpuid, int ipl)
874 {
875 	struct cpu *cp = cpu[cpuid];
876 
877 	if (cp == NULL || LOCAL_WITH_INTR_DISABLED(cpuid))
878 		return;
879 
880 	/*
881 	 * Don't wait if the CPU is quiesced or offlined. This can happen
882 	 * when a CPU is running pause thread but hardware triggered an
883 	 * interrupt and the interrupt gets queued.
884 	 */
885 	for (;;) {
886 		if (!INTR_ACTIVE((volatile struct cpu *)cpu[cpuid], ipl) &&
887 		    (!APIX_CPU_ENABLED(cp) ||
888 		    !INTR_PENDING((volatile apix_impl_t *)apixs[cpuid], ipl)))
889 			return;
890 	}
891 }
892 
893 static void
894 apix_remove_av(apix_vector_t *vecp, struct autovec *target)
895 {
896 	int hi_pri = 0;
897 	struct autovec *p;
898 
899 	if (target == NULL)
900 		return;
901 
902 	APIC_VERBOSE(INTR, (CE_CONT, "apix_remove_av: dip %p, vector 0x%x, "
903 	    "cpu %d\n", (void *)target->av_dip, vecp->v_vector, vecp->v_cpuid));
904 
905 	for (p = vecp->v_autovect; p; p = p->av_link) {
906 		if (p == target || p->av_vector == NULL)
907 			continue;
908 		hi_pri = (p->av_prilevel > hi_pri) ? p->av_prilevel : hi_pri;
909 	}
910 
911 	vecp->v_share--;
912 	vecp->v_pri = hi_pri;
913 
914 	/*
915 	 * This drops the handler from the chain, it can no longer be called.
916 	 * However, there is no guarantee that the handler is not currently
917 	 * still executing.
918 	 */
919 	target->av_vector = NULL;
920 	/*
921 	 * There is a race where we could be just about to pick up the ticksp
922 	 * pointer to increment it after returning from the service routine
923 	 * in av_dispatch_autovect.  Rather than NULL it out let's just point
924 	 * it off to something safe so that any final tick update attempt
925 	 * won't fault.
926 	 */
927 	target->av_ticksp = &dummy_tick;
928 	apix_wait_till_seen(vecp->v_cpuid, target->av_prilevel);
929 }
930 
931 static struct autovec *
932 apix_find_av(apix_vector_t *vecp, void *intr_id, avfunc f)
933 {
934 	struct autovec *p;
935 
936 	for (p = vecp->v_autovect; p; p = p->av_link) {
937 		if ((p->av_vector == f) && (p->av_intr_id == intr_id)) {
938 			/* found the handler */
939 			return (p);
940 		}
941 	}
942 
943 	return (NULL);
944 }
945 
946 static apix_vector_t *
947 apix_find_vector_by_avintr(void *intr_id, avfunc f)
948 {
949 	apix_vector_t *vecp;
950 	processorid_t n;
951 	uchar_t v;
952 
953 	for (n = 0; n < apic_nproc; n++) {
954 		if (!apix_is_cpu_enabled(n))
955 			continue;
956 
957 		for (v = APIX_AVINTR_MIN; v <= APIX_AVINTR_MAX; v++) {
958 			vecp = xv_vector(n, v);
959 			if (vecp == NULL ||
960 			    vecp->v_state <= APIX_STATE_OBSOLETED)
961 				continue;
962 
963 			if (apix_find_av(vecp, intr_id, f) != NULL)
964 				return (vecp);
965 		}
966 	}
967 
968 	return (NULL);
969 }
970 
971 /*
972  * Add interrupt service routine.
973  *
974  * For legacy interrupts (HPET timer, ACPI SCI), the vector is actually
975  * IRQ no. A vector is then allocated. Otherwise, the vector is already
976  * allocated. The input argument virt_vect is virtual vector of format
977  * APIX_VIRTVEC_VECTOR(cpuid, vector).
978  *
979  * Return 1 on success, 0 on failure.
980  */
981 int
982 apix_add_avintr(void *intr_id, int ipl, avfunc xxintr, char *name,
983     int virt_vect, caddr_t arg1, caddr_t arg2, uint64_t *ticksp,
984     dev_info_t *dip)
985 {
986 	int cpuid;
987 	uchar_t v = (uchar_t)APIX_VIRTVEC_VECTOR(virt_vect);
988 	apix_vector_t *vecp;
989 
990 	if (xxintr == NULL) {
991 		cmn_err(CE_WARN, "Attempt to add null for %s "
992 		    "on vector 0x%x,0x%x", name,
993 		    APIX_VIRTVEC_CPU(virt_vect),
994 		    APIX_VIRTVEC_VECTOR(virt_vect));
995 		return (0);
996 	}
997 
998 	if (v >= APIX_IPI_MIN)	/* IPIs */
999 		return (apix_add_ipi(ipl, xxintr, name, v, arg1, arg2));
1000 
1001 	if (!APIX_IS_VIRTVEC(virt_vect)) {	/* got irq */
1002 		int irqno = virt_vect;
1003 		int inum = GET_INTR_INUM(intr_id);
1004 
1005 		/*
1006 		 * Senarios include:
1007 		 * a. add_avintr() is called before irqp initialized (legacy)
1008 		 * b. irqp is initialized, vector is not allocated (fixed)
1009 		 * c. irqp is initialized, vector is allocated (fixed & shared)
1010 		 */
1011 		if ((vecp = apix_alloc_intx(dip, inum, irqno)) == NULL)
1012 			return (0);
1013 
1014 		cpuid = vecp->v_cpuid;
1015 		v = vecp->v_vector;
1016 		virt_vect = APIX_VIRTVECTOR(cpuid, v);
1017 	} else {	/* got virtual vector */
1018 		cpuid = APIX_VIRTVEC_CPU(virt_vect);
1019 		vecp = xv_vector(cpuid, v);
1020 		ASSERT(vecp != NULL);
1021 	}
1022 
1023 	lock_set(&apix_lock);
1024 	if (vecp->v_state <= APIX_STATE_OBSOLETED) {
1025 		vecp = NULL;
1026 
1027 		/*
1028 		 * Basically the allocated but not enabled interrupts
1029 		 * will not get re-targeted. But MSIs in allocated state
1030 		 * could be re-targeted due to group re-targeting.
1031 		 */
1032 		if (intr_id != NULL && dip != NULL) {
1033 			ddi_intr_handle_impl_t *hdlp = intr_id;
1034 			vecp = apix_get_dev_map(dip, hdlp->ih_inum,
1035 			    hdlp->ih_type);
1036 			ASSERT(vecp->v_state == APIX_STATE_ALLOCED);
1037 		}
1038 		if (vecp == NULL) {
1039 			lock_clear(&apix_lock);
1040 			cmn_err(CE_WARN, "Invalid interrupt 0x%x,0x%x "
1041 			    " for %p to add", cpuid, v, intr_id);
1042 			return (0);
1043 		}
1044 		cpuid = vecp->v_cpuid;
1045 		virt_vect = APIX_VIRTVECTOR(cpuid, vecp->v_vector);
1046 	}
1047 
1048 	APIX_ENTER_CPU_LOCK(cpuid);
1049 	apix_insert_av(vecp, intr_id, xxintr, arg1, arg2, ticksp, ipl, dip);
1050 	APIX_LEAVE_CPU_LOCK(cpuid);
1051 
1052 	(void) apix_addspl(virt_vect, ipl, 0, 0);
1053 
1054 	lock_clear(&apix_lock);
1055 
1056 	return (1);
1057 }
1058 
1059 /*
1060  * Remove avintr
1061  *
1062  * For fixed, if it's the last one of shared interrupts, free the vector.
1063  * For msi/x, only disable the interrupt but not free the vector, which
1064  * is freed by PSM_XXX_FREE_XXX.
1065  */
1066 void
1067 apix_rem_avintr(void *intr_id, int ipl, avfunc xxintr, int virt_vect)
1068 {
1069 	avfunc f;
1070 	apix_vector_t *vecp;
1071 	struct autovec *avp;
1072 	processorid_t cpuid;
1073 
1074 	if ((f = xxintr) == NULL)
1075 		return;
1076 
1077 	lock_set(&apix_lock);
1078 
1079 	if (!APIX_IS_VIRTVEC(virt_vect)) {	/* got irq */
1080 		vecp = apix_intx_get_vector(virt_vect);
1081 		virt_vect = APIX_VIRTVECTOR(vecp->v_cpuid, vecp->v_vector);
1082 	} else	/* got virtual vector */
1083 		vecp = xv_vector(APIX_VIRTVEC_CPU(virt_vect),
1084 		    APIX_VIRTVEC_VECTOR(virt_vect));
1085 
1086 	if (vecp == NULL) {
1087 		lock_clear(&apix_lock);
1088 		cmn_err(CE_CONT, "Invalid interrupt 0x%x,0x%x to remove",
1089 		    APIX_VIRTVEC_CPU(virt_vect),
1090 		    APIX_VIRTVEC_VECTOR(virt_vect));
1091 		return;
1092 	}
1093 
1094 	if (vecp->v_state <= APIX_STATE_OBSOLETED ||
1095 	    ((avp = apix_find_av(vecp, intr_id, f)) == NULL)) {
1096 		/*
1097 		 * It's possible that the interrupt is rebound to a
1098 		 * different cpu before rem_avintr() is called. Search
1099 		 * through all vectors once it happens.
1100 		 */
1101 		if ((vecp = apix_find_vector_by_avintr(intr_id, f))
1102 		    == NULL) {
1103 			lock_clear(&apix_lock);
1104 			cmn_err(CE_CONT, "Unknown interrupt 0x%x,0x%x "
1105 			    "for %p to remove", APIX_VIRTVEC_CPU(virt_vect),
1106 			    APIX_VIRTVEC_VECTOR(virt_vect), intr_id);
1107 			return;
1108 		}
1109 		virt_vect = APIX_VIRTVECTOR(vecp->v_cpuid, vecp->v_vector);
1110 		avp = apix_find_av(vecp, intr_id, f);
1111 	}
1112 	cpuid = vecp->v_cpuid;
1113 
1114 	/* disable interrupt */
1115 	(void) apix_delspl(virt_vect, ipl, 0, 0);
1116 
1117 	/* remove ISR entry */
1118 	APIX_ENTER_CPU_LOCK(cpuid);
1119 	apix_remove_av(vecp, avp);
1120 	APIX_LEAVE_CPU_LOCK(cpuid);
1121 
1122 	lock_clear(&apix_lock);
1123 }
1124 
1125 /*
1126  * Device to vector mapping table
1127  */
1128 
1129 static void
1130 apix_clear_dev_map(dev_info_t *dip, int inum, int type)
1131 {
1132 	char *name;
1133 	major_t major;
1134 	apix_dev_vector_t *dvp, *prev = NULL;
1135 	int found = 0;
1136 
1137 	name = ddi_get_name(dip);
1138 	major = ddi_name_to_major(name);
1139 
1140 	mutex_enter(&apix_mutex);
1141 
1142 	for (dvp = apix_dev_vector[major]; dvp != NULL;
1143 	    prev = dvp, dvp = dvp->dv_next) {
1144 		if (dvp->dv_dip == dip && dvp->dv_inum == inum &&
1145 		    dvp->dv_type == type) {
1146 			found++;
1147 			break;
1148 		}
1149 	}
1150 
1151 	if (!found) {
1152 		mutex_exit(&apix_mutex);
1153 		return;
1154 	}
1155 
1156 	if (prev != NULL)
1157 		prev->dv_next = dvp->dv_next;
1158 
1159 	if (apix_dev_vector[major] == dvp)
1160 		apix_dev_vector[major] = dvp->dv_next;
1161 
1162 	dvp->dv_vector->v_devp = NULL;
1163 
1164 	mutex_exit(&apix_mutex);
1165 
1166 	kmem_free(dvp, sizeof (apix_dev_vector_t));
1167 }
1168 
1169 void
1170 apix_set_dev_map(apix_vector_t *vecp, dev_info_t *dip, int inum)
1171 {
1172 	apix_dev_vector_t *dvp;
1173 	char *name;
1174 	major_t major;
1175 	uint32_t found = 0;
1176 
1177 	ASSERT(dip != NULL);
1178 	name = ddi_get_name(dip);
1179 	major = ddi_name_to_major(name);
1180 
1181 	mutex_enter(&apix_mutex);
1182 
1183 	for (dvp = apix_dev_vector[major]; dvp != NULL;
1184 	    dvp = dvp->dv_next) {
1185 		if (dvp->dv_dip == dip && dvp->dv_inum == inum &&
1186 		    dvp->dv_type == vecp->v_type) {
1187 			found++;
1188 			break;
1189 		}
1190 	}
1191 
1192 	if (found == 0) {	/* not found */
1193 		dvp = kmem_zalloc(sizeof (apix_dev_vector_t), KM_SLEEP);
1194 		dvp->dv_dip = dip;
1195 		dvp->dv_inum = inum;
1196 		dvp->dv_type = vecp->v_type;
1197 
1198 		dvp->dv_next = apix_dev_vector[major];
1199 		apix_dev_vector[major] = dvp;
1200 	}
1201 	dvp->dv_vector = vecp;
1202 	vecp->v_devp = dvp;
1203 
1204 	mutex_exit(&apix_mutex);
1205 
1206 	DDI_INTR_IMPLDBG((CE_CONT, "apix_set_dev_map: dip=0x%p "
1207 	    "inum=0x%x  vector=0x%x/0x%x\n",
1208 	    (void *)dip, inum, vecp->v_cpuid, vecp->v_vector));
1209 }
1210 
1211 apix_vector_t *
1212 apix_get_dev_map(dev_info_t *dip, int inum, int type)
1213 {
1214 	char *name;
1215 	major_t major;
1216 	apix_dev_vector_t *dvp;
1217 	apix_vector_t *vecp;
1218 
1219 	name = ddi_get_name(dip);
1220 	if ((major = ddi_name_to_major(name)) == DDI_MAJOR_T_NONE)
1221 		return (NULL);
1222 
1223 	mutex_enter(&apix_mutex);
1224 	for (dvp = apix_dev_vector[major]; dvp != NULL;
1225 	    dvp = dvp->dv_next) {
1226 		if (dvp->dv_dip == dip && dvp->dv_inum == inum &&
1227 		    dvp->dv_type == type) {
1228 			vecp = dvp->dv_vector;
1229 			mutex_exit(&apix_mutex);
1230 			return (vecp);
1231 		}
1232 	}
1233 	mutex_exit(&apix_mutex);
1234 
1235 	return (NULL);
1236 }
1237 
1238 /*
1239  * Get minimum inum for specified device, used for MSI
1240  */
1241 int
1242 apix_get_min_dev_inum(dev_info_t *dip, int type)
1243 {
1244 	char *name;
1245 	major_t major;
1246 	apix_dev_vector_t *dvp;
1247 	int inum = -1;
1248 
1249 	name = ddi_get_name(dip);
1250 	major = ddi_name_to_major(name);
1251 
1252 	mutex_enter(&apix_mutex);
1253 	for (dvp = apix_dev_vector[major]; dvp != NULL;
1254 	    dvp = dvp->dv_next) {
1255 		if (dvp->dv_dip == dip && dvp->dv_type == type) {
1256 			if (inum == -1)
1257 				inum = dvp->dv_inum;
1258 			else
1259 				inum = (dvp->dv_inum < inum) ?
1260 				    dvp->dv_inum : inum;
1261 		}
1262 	}
1263 	mutex_exit(&apix_mutex);
1264 
1265 	return (inum);
1266 }
1267 
1268 int
1269 apix_get_max_dev_inum(dev_info_t *dip, int type)
1270 {
1271 	char *name;
1272 	major_t major;
1273 	apix_dev_vector_t *dvp;
1274 	int inum = -1;
1275 
1276 	name = ddi_get_name(dip);
1277 	major = ddi_name_to_major(name);
1278 
1279 	mutex_enter(&apix_mutex);
1280 	for (dvp = apix_dev_vector[major]; dvp != NULL;
1281 	    dvp = dvp->dv_next) {
1282 		if (dvp->dv_dip == dip && dvp->dv_type == type) {
1283 			if (inum == -1)
1284 				inum = dvp->dv_inum;
1285 			else
1286 				inum = (dvp->dv_inum > inum) ?
1287 				    dvp->dv_inum : inum;
1288 		}
1289 	}
1290 	mutex_exit(&apix_mutex);
1291 
1292 	return (inum);
1293 }
1294 
1295 /*
1296  * Major to cpu binding, for INTR_ROUND_ROBIN_WITH_AFFINITY cpu
1297  * binding policy
1298  */
1299 
1300 static uint32_t
1301 apix_get_dev_binding(dev_info_t *dip)
1302 {
1303 	major_t major;
1304 	char *name;
1305 	uint32_t cpu = IRQ_UNINIT;
1306 
1307 	name = ddi_get_name(dip);
1308 	major = ddi_name_to_major(name);
1309 	if (major < devcnt) {
1310 		mutex_enter(&apix_mutex);
1311 		cpu = apix_major_to_cpu[major];
1312 		mutex_exit(&apix_mutex);
1313 	}
1314 
1315 	return (cpu);
1316 }
1317 
1318 static void
1319 apix_set_dev_binding(dev_info_t *dip, uint32_t cpu)
1320 {
1321 	major_t major;
1322 	char *name;
1323 
1324 	/* setup major to cpu mapping */
1325 	name = ddi_get_name(dip);
1326 	major = ddi_name_to_major(name);
1327 	if (apix_major_to_cpu[major] == IRQ_UNINIT) {
1328 		mutex_enter(&apix_mutex);
1329 		apix_major_to_cpu[major] = cpu;
1330 		mutex_exit(&apix_mutex);
1331 	}
1332 }
1333 
1334 /*
1335  * return the cpu to which this intr should be bound.
1336  * Check properties or any other mechanism to see if user wants it
1337  * bound to a specific CPU. If so, return the cpu id with high bit set.
1338  * If not, use the policy to choose a cpu and return the id.
1339  */
1340 uint32_t
1341 apix_bind_cpu(dev_info_t *dip)
1342 {
1343 	int	instance, instno, prop_len, bind_cpu, count;
1344 	uint_t	i, rc;
1345 	major_t	major;
1346 	char	*name, *drv_name, *prop_val, *cptr;
1347 	char	prop_name[32];
1348 
1349 	lock_set(&apix_lock);
1350 
1351 	if (apic_intr_policy == INTR_LOWEST_PRIORITY) {
1352 		cmn_err(CE_WARN, "apix: unsupported interrupt binding policy "
1353 		    "LOWEST PRIORITY, use ROUND ROBIN instead");
1354 		apic_intr_policy = INTR_ROUND_ROBIN;
1355 	}
1356 
1357 	if (apic_nproc == 1) {
1358 		lock_clear(&apix_lock);
1359 		return (0);
1360 	}
1361 
1362 	drv_name = NULL;
1363 	rc = DDI_PROP_NOT_FOUND;
1364 	major = (major_t)-1;
1365 	if (dip != NULL) {
1366 		name = ddi_get_name(dip);
1367 		major = ddi_name_to_major(name);
1368 		drv_name = ddi_major_to_name(major);
1369 		instance = ddi_get_instance(dip);
1370 		if (apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) {
1371 			bind_cpu = apix_get_dev_binding(dip);
1372 			if (bind_cpu != IRQ_UNINIT) {
1373 				lock_clear(&apix_lock);
1374 				return (bind_cpu);
1375 			}
1376 		}
1377 		/*
1378 		 * search for "drvname"_intpt_bind_cpus property first, the
1379 		 * syntax of the property should be "a[,b,c,...]" where
1380 		 * instance 0 binds to cpu a, instance 1 binds to cpu b,
1381 		 * instance 3 binds to cpu c...
1382 		 * ddi_getlongprop() will search /option first, then /
1383 		 * if "drvname"_intpt_bind_cpus doesn't exist, then find
1384 		 * intpt_bind_cpus property.  The syntax is the same, and
1385 		 * it applies to all the devices if its "drvname" specific
1386 		 * property doesn't exist
1387 		 */
1388 		(void) strcpy(prop_name, drv_name);
1389 		(void) strcat(prop_name, "_intpt_bind_cpus");
1390 		rc = ddi_getlongprop(DDI_DEV_T_ANY, dip, 0, prop_name,
1391 		    (caddr_t)&prop_val, &prop_len);
1392 		if (rc != DDI_PROP_SUCCESS) {
1393 			rc = ddi_getlongprop(DDI_DEV_T_ANY, dip, 0,
1394 			    "intpt_bind_cpus", (caddr_t)&prop_val, &prop_len);
1395 		}
1396 	}
1397 	if (rc == DDI_PROP_SUCCESS) {
1398 		for (i = count = 0; i < (prop_len - 1); i++)
1399 			if (prop_val[i] == ',')
1400 				count++;
1401 		if (prop_val[i-1] != ',')
1402 			count++;
1403 		/*
1404 		 * if somehow the binding instances defined in the
1405 		 * property are not enough for this instno., then
1406 		 * reuse the pattern for the next instance until
1407 		 * it reaches the requested instno
1408 		 */
1409 		instno = instance % count;
1410 		i = 0;
1411 		cptr = prop_val;
1412 		while (i < instno)
1413 			if (*cptr++ == ',')
1414 				i++;
1415 		bind_cpu = stoi(&cptr);
1416 		kmem_free(prop_val, prop_len);
1417 		/* if specific cpu is bogus, then default to cpu 0 */
1418 		if (bind_cpu >= apic_nproc) {
1419 			cmn_err(CE_WARN, "apix: %s=%s: CPU %d not present",
1420 			    prop_name, prop_val, bind_cpu);
1421 			bind_cpu = 0;
1422 		} else {
1423 			/* indicate that we are bound at user request */
1424 			bind_cpu |= IRQ_USER_BOUND;
1425 		}
1426 		/*
1427 		 * no need to check apic_cpus[].aci_status, if specific cpu is
1428 		 * not up, then post_cpu_start will handle it.
1429 		 */
1430 	} else {
1431 		bind_cpu = apic_get_next_bind_cpu();
1432 	}
1433 
1434 	lock_clear(&apix_lock);
1435 
1436 	return ((uint32_t)bind_cpu);
1437 }
1438 
1439 static boolean_t
1440 apix_is_cpu_enabled(processorid_t cpuid)
1441 {
1442 	apic_cpus_info_t *cpu_infop;
1443 
1444 	cpu_infop = &apic_cpus[cpuid];
1445 
1446 	if ((cpu_infop->aci_status & APIC_CPU_INTR_ENABLE) == 0)
1447 		return (B_FALSE);
1448 
1449 	return (B_TRUE);
1450 }
1451 
1452 /*
1453  * Must be called with apix_lock held. This function can be
1454  * called from above lock level by apix_intr_redistribute().
1455  *
1456  * Arguments:
1457  *    vecp  : Vector to be rebound
1458  *    tocpu : Target cpu. IRQ_UNINIT means target is vecp->v_cpuid.
1459  *    count : Number of continuous vectors
1460  *
1461  * Return new vector being bound to
1462  */
1463 apix_vector_t *
1464 apix_rebind(apix_vector_t *vecp, processorid_t newcpu, int count)
1465 {
1466 	apix_vector_t *newp, *oldp;
1467 	processorid_t oldcpu = vecp->v_cpuid;
1468 	uchar_t newvec, oldvec = vecp->v_vector;
1469 	int i;
1470 
1471 	ASSERT(LOCK_HELD(&apix_lock) && count > 0);
1472 
1473 	if (!apix_is_cpu_enabled(newcpu))
1474 		return (NULL);
1475 
1476 	if (vecp->v_cpuid == newcpu)	/* rebind to the same cpu */
1477 		return (vecp);
1478 
1479 	APIX_ENTER_CPU_LOCK(oldcpu);
1480 	APIX_ENTER_CPU_LOCK(newcpu);
1481 
1482 	/* allocate vector */
1483 	if (count == 1)
1484 		newp = apix_alloc_vector_oncpu(newcpu, NULL, 0, vecp->v_type);
1485 	else {
1486 		ASSERT(vecp->v_type == APIX_TYPE_MSI);
1487 		newp = apix_alloc_nvectors_oncpu(newcpu, NULL, 0, count,
1488 		    vecp->v_type);
1489 	}
1490 	if (newp == NULL) {
1491 		APIX_LEAVE_CPU_LOCK(newcpu);
1492 		APIX_LEAVE_CPU_LOCK(oldcpu);
1493 		return (NULL);
1494 	}
1495 
1496 	newvec = newp->v_vector;
1497 	apix_dup_vectors(vecp, newp, count);
1498 
1499 	APIX_LEAVE_CPU_LOCK(newcpu);
1500 	APIX_LEAVE_CPU_LOCK(oldcpu);
1501 
1502 	if (!DDI_INTR_IS_MSI_OR_MSIX(vecp->v_type)) {
1503 		ASSERT(count == 1);
1504 		if (apix_intx_rebind(vecp->v_inum, newcpu, newvec) != 0) {
1505 			struct autovec *avp;
1506 			int inum;
1507 
1508 			/* undo duplication */
1509 			APIX_ENTER_CPU_LOCK(oldcpu);
1510 			APIX_ENTER_CPU_LOCK(newcpu);
1511 			for (avp = newp->v_autovect; avp != NULL;
1512 			    avp = avp->av_link) {
1513 				if (avp->av_dip != NULL) {
1514 					inum = GET_INTR_INUM(avp->av_intr_id);
1515 					apix_set_dev_map(vecp, avp->av_dip,
1516 					    inum);
1517 				}
1518 				apix_remove_av(newp, avp);
1519 			}
1520 			apix_cleanup_vector(newp);
1521 			APIX_LEAVE_CPU_LOCK(newcpu);
1522 			APIX_LEAVE_CPU_LOCK(oldcpu);
1523 			APIC_VERBOSE(REBIND, (CE_CONT, "apix: rebind fixed "
1524 			    "interrupt 0x%x to cpu %d failed\n",
1525 			    vecp->v_inum, newcpu));
1526 			return (NULL);
1527 		}
1528 
1529 		APIX_ENTER_CPU_LOCK(oldcpu);
1530 		(void) apix_obsolete_vector(vecp);
1531 		APIX_LEAVE_CPU_LOCK(oldcpu);
1532 		APIC_VERBOSE(REBIND, (CE_CONT, "apix: rebind fixed interrupt"
1533 		    " 0x%x/0x%x to 0x%x/0x%x\n",
1534 		    oldcpu, oldvec, newcpu, newvec));
1535 		return (newp);
1536 	}
1537 
1538 	for (i = 0; i < count; i++) {
1539 		oldp = xv_vector(oldcpu, oldvec + i);
1540 		newp = xv_vector(newcpu, newvec + i);
1541 
1542 		if (newp->v_share > 0) {
1543 			APIX_SET_REBIND_INFO(oldp, newp);
1544 
1545 			apix_enable_vector(newp);
1546 
1547 			APIX_CLR_REBIND_INFO();
1548 		}
1549 
1550 		APIX_ENTER_CPU_LOCK(oldcpu);
1551 		(void) apix_obsolete_vector(oldp);
1552 		APIX_LEAVE_CPU_LOCK(oldcpu);
1553 	}
1554 	APIC_VERBOSE(REBIND, (CE_CONT, "apix: rebind vector 0x%x/0x%x "
1555 	    "to 0x%x/0x%x, count=%d\n",
1556 	    oldcpu, oldvec, newcpu, newvec, count));
1557 
1558 	return (xv_vector(newcpu, newvec));
1559 }
1560 
1561 /*
1562  * Senarios include:
1563  * a. add_avintr() is called before irqp initialized (legacy)
1564  * b. irqp is initialized, vector is not allocated (fixed interrupts)
1565  * c. irqp is initialized, vector is allocated (shared interrupts)
1566  */
1567 apix_vector_t *
1568 apix_alloc_intx(dev_info_t *dip, int inum, int irqno)
1569 {
1570 	apic_irq_t *irqp;
1571 	apix_vector_t *vecp;
1572 
1573 	/*
1574 	 * Allocate IRQ. Caller is later responsible for the
1575 	 * initialization
1576 	 */
1577 	mutex_enter(&airq_mutex);
1578 	if ((irqp = apic_irq_table[irqno]) == NULL) {
1579 		/* allocate irq */
1580 		irqp = kmem_zalloc(sizeof (apic_irq_t), KM_SLEEP);
1581 		irqp->airq_mps_intr_index = FREE_INDEX;
1582 		apic_irq_table[irqno] = irqp;
1583 	}
1584 	if (irqp->airq_mps_intr_index == FREE_INDEX) {
1585 		irqp->airq_mps_intr_index = DEFAULT_INDEX;
1586 		irqp->airq_cpu = IRQ_UNINIT;
1587 		irqp->airq_origirq = (uchar_t)irqno;
1588 	}
1589 
1590 	mutex_exit(&airq_mutex);
1591 
1592 	/*
1593 	 * allocate vector
1594 	 */
1595 	if (irqp->airq_cpu == IRQ_UNINIT) {
1596 		uint32_t bindcpu, cpuid;
1597 
1598 		/* select cpu by system policy */
1599 		bindcpu = apix_bind_cpu(dip);
1600 		cpuid = bindcpu & ~IRQ_USER_BOUND;
1601 
1602 		/* allocate vector */
1603 		APIX_ENTER_CPU_LOCK(cpuid);
1604 
1605 		if ((vecp = apix_alloc_vector_oncpu(bindcpu, dip, inum,
1606 		    APIX_TYPE_FIXED)) == NULL) {
1607 			cmn_err(CE_WARN, "No interrupt vector for irq %x",
1608 			    irqno);
1609 			APIX_LEAVE_CPU_LOCK(cpuid);
1610 			return (NULL);
1611 		}
1612 		vecp->v_inum = irqno;
1613 		vecp->v_flags |= APIX_VECT_MASKABLE;
1614 
1615 		apix_intx_set_vector(irqno, vecp->v_cpuid, vecp->v_vector);
1616 
1617 		APIX_LEAVE_CPU_LOCK(cpuid);
1618 	} else {
1619 		vecp = xv_vector(irqp->airq_cpu, irqp->airq_vector);
1620 		ASSERT(!IS_VECT_FREE(vecp));
1621 
1622 		if (dip != NULL)
1623 			apix_set_dev_map(vecp, dip, inum);
1624 	}
1625 
1626 	if ((dip != NULL) &&
1627 	    (apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) &&
1628 	    ((vecp->v_flags & APIX_VECT_USER_BOUND) == 0))
1629 		apix_set_dev_binding(dip, vecp->v_cpuid);
1630 
1631 	apix_dprint_vector(vecp, dip, 1);
1632 
1633 	return (vecp);
1634 }
1635 
1636 int
1637 apix_alloc_msi(dev_info_t *dip, int inum, int count, int behavior)
1638 {
1639 	int i, cap_ptr, rcount = count;
1640 	apix_vector_t *vecp;
1641 	processorid_t bindcpu, cpuid;
1642 	ushort_t msi_ctrl;
1643 	ddi_acc_handle_t handle;
1644 
1645 	DDI_INTR_IMPLDBG((CE_CONT, "apix_alloc_msi_vectors: dip=0x%p "
1646 	    "inum=0x%x  count=0x%x behavior=%d\n",
1647 	    (void *)dip, inum, count, behavior));
1648 
1649 	if (count > 1) {
1650 		if (behavior == DDI_INTR_ALLOC_STRICT &&
1651 		    apic_multi_msi_enable == 0)
1652 			return (0);
1653 		if (apic_multi_msi_enable == 0)
1654 			count = 1;
1655 	}
1656 
1657 	/* Check whether it supports per-vector masking */
1658 	cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip);
1659 	handle = i_ddi_get_pci_config_handle(dip);
1660 	msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
1661 
1662 	/* bind to cpu */
1663 	bindcpu = apix_bind_cpu(dip);
1664 	cpuid = bindcpu & ~IRQ_USER_BOUND;
1665 
1666 	/* if not ISP2, then round it down */
1667 	if (!ISP2(rcount))
1668 		rcount = 1 << (highbit(rcount) - 1);
1669 
1670 	APIX_ENTER_CPU_LOCK(cpuid);
1671 	for (vecp = NULL; rcount > 0; rcount >>= 1) {
1672 		vecp = apix_alloc_nvectors_oncpu(bindcpu, dip, inum, rcount,
1673 		    APIX_TYPE_MSI);
1674 		if (vecp != NULL || behavior == DDI_INTR_ALLOC_STRICT)
1675 			break;
1676 	}
1677 	for (i = 0; vecp && i < rcount; i++)
1678 		xv_vector(vecp->v_cpuid, vecp->v_vector + i)->v_flags |=
1679 		    (msi_ctrl & PCI_MSI_PVM_MASK) ? APIX_VECT_MASKABLE : 0;
1680 	APIX_LEAVE_CPU_LOCK(cpuid);
1681 	if (vecp == NULL) {
1682 		APIC_VERBOSE(INTR, (CE_CONT,
1683 		    "apix_alloc_msi: no %d cont vectors found on cpu 0x%x\n",
1684 		    count, bindcpu));
1685 		return (0);
1686 	}
1687 
1688 	/* major to cpu binding */
1689 	if ((apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) &&
1690 	    ((vecp->v_flags & APIX_VECT_USER_BOUND) == 0))
1691 		apix_set_dev_binding(dip, vecp->v_cpuid);
1692 
1693 	apix_dprint_vector(vecp, dip, rcount);
1694 
1695 	return (rcount);
1696 }
1697 
1698 int
1699 apix_alloc_msix(dev_info_t *dip, int inum, int count, int behavior)
1700 {
1701 	apix_vector_t *vecp;
1702 	processorid_t bindcpu, cpuid;
1703 	int i;
1704 
1705 	for (i = 0; i < count; i++) {
1706 		/* select cpu by system policy */
1707 		bindcpu = apix_bind_cpu(dip);
1708 		cpuid = bindcpu & ~IRQ_USER_BOUND;
1709 
1710 		/* allocate vector */
1711 		APIX_ENTER_CPU_LOCK(cpuid);
1712 		if ((vecp = apix_alloc_vector_oncpu(bindcpu, dip, inum + i,
1713 		    APIX_TYPE_MSIX)) == NULL) {
1714 			APIX_LEAVE_CPU_LOCK(cpuid);
1715 			APIC_VERBOSE(INTR, (CE_CONT, "apix_alloc_msix: "
1716 			    "allocate msix for device dip=%p, inum=%d on"
1717 			    " cpu %d failed", (void *)dip, inum + i, bindcpu));
1718 			break;
1719 		}
1720 		vecp->v_flags |= APIX_VECT_MASKABLE;
1721 		APIX_LEAVE_CPU_LOCK(cpuid);
1722 
1723 		/* major to cpu mapping */
1724 		if ((i == 0) &&
1725 		    (apic_intr_policy == INTR_ROUND_ROBIN_WITH_AFFINITY) &&
1726 		    ((vecp->v_flags & APIX_VECT_USER_BOUND) == 0))
1727 			apix_set_dev_binding(dip, vecp->v_cpuid);
1728 
1729 		apix_dprint_vector(vecp, dip, 1);
1730 	}
1731 
1732 	if (i < count && behavior == DDI_INTR_ALLOC_STRICT) {
1733 		APIC_VERBOSE(INTR, (CE_WARN, "apix_alloc_msix: "
1734 		    "strictly allocate %d vectors failed, got %d\n",
1735 		    count, i));
1736 		apix_free_vectors(dip, inum, i, APIX_TYPE_MSIX);
1737 		i = 0;
1738 	}
1739 
1740 	return (i);
1741 }
1742 
1743 /*
1744  * A rollback free for vectors allocated by apix_alloc_xxx().
1745  */
1746 void
1747 apix_free_vectors(dev_info_t *dip, int inum, int count, int type)
1748 {
1749 	int i, cpuid;
1750 	apix_vector_t *vecp;
1751 
1752 	DDI_INTR_IMPLDBG((CE_CONT, "apix_free_vectors: dip: %p inum: %x "
1753 	    "count: %x type: %x\n",
1754 	    (void *)dip, inum, count, type));
1755 
1756 	lock_set(&apix_lock);
1757 
1758 	for (i = 0; i < count; i++, inum++) {
1759 		if ((vecp = apix_get_dev_map(dip, inum, type)) == NULL) {
1760 			lock_clear(&apix_lock);
1761 			DDI_INTR_IMPLDBG((CE_CONT, "apix_free_vectors: "
1762 			    "dip=0x%p inum=0x%x type=0x%x apix_find_intr() "
1763 			    "failed\n", (void *)dip, inum, type));
1764 			continue;
1765 		}
1766 
1767 		APIX_ENTER_CPU_LOCK(vecp->v_cpuid);
1768 		cpuid = vecp->v_cpuid;
1769 
1770 		DDI_INTR_IMPLDBG((CE_CONT, "apix_free_vectors: "
1771 		    "dip=0x%p inum=0x%x type=0x%x vector 0x%x (share %d)\n",
1772 		    (void *)dip, inum, type, vecp->v_vector, vecp->v_share));
1773 
1774 		/* tear down device interrupt to vector mapping */
1775 		apix_clear_dev_map(dip, inum, type);
1776 
1777 		if (vecp->v_type == APIX_TYPE_FIXED) {
1778 			if (vecp->v_share > 0) {	/* share IRQ line */
1779 				APIX_LEAVE_CPU_LOCK(cpuid);
1780 				continue;
1781 			}
1782 
1783 			/* Free apic_irq_table entry */
1784 			apix_intx_free(vecp->v_inum);
1785 		}
1786 
1787 		/* free vector */
1788 		apix_cleanup_vector(vecp);
1789 
1790 		APIX_LEAVE_CPU_LOCK(cpuid);
1791 	}
1792 
1793 	lock_clear(&apix_lock);
1794 }
1795 
1796 /*
1797  * Must be called with apix_lock held
1798  */
1799 apix_vector_t *
1800 apix_setup_io_intr(apix_vector_t *vecp)
1801 {
1802 	processorid_t bindcpu;
1803 	int ret;
1804 
1805 	ASSERT(LOCK_HELD(&apix_lock));
1806 
1807 	/*
1808 	 * Interrupts are enabled on the CPU, programme IOAPIC RDT
1809 	 * entry or MSI/X address/data to enable the interrupt.
1810 	 */
1811 	if (apix_is_cpu_enabled(vecp->v_cpuid)) {
1812 		apix_enable_vector(vecp);
1813 		return (vecp);
1814 	}
1815 
1816 	/*
1817 	 * CPU is not up or interrupts are disabled. Fall back to the
1818 	 * first avialable CPU.
1819 	 */
1820 	bindcpu = apic_find_cpu(APIC_CPU_INTR_ENABLE);
1821 
1822 	if (vecp->v_type == APIX_TYPE_MSI)
1823 		return (apix_grp_set_cpu(vecp, bindcpu, &ret));
1824 
1825 	return (apix_set_cpu(vecp, bindcpu, &ret));
1826 }
1827 
1828 /*
1829  * For interrupts which call add_avintr() before apic is initialized.
1830  * ioapix_setup_intr() will
1831  *   - allocate vector
1832  *   - copy over ISR
1833  */
1834 static void
1835 ioapix_setup_intr(int irqno, iflag_t *flagp)
1836 {
1837 	extern struct av_head autovect[];
1838 	apix_vector_t *vecp;
1839 	apic_irq_t *irqp;
1840 	uchar_t ioapicindex, ipin;
1841 	ulong_t iflag;
1842 	struct autovec *avp;
1843 
1844 	ioapicindex = acpi_find_ioapic(irqno);
1845 	ASSERT(ioapicindex != 0xFF);
1846 	ipin = irqno - apic_io_vectbase[ioapicindex];
1847 
1848 	mutex_enter(&airq_mutex);
1849 	irqp = apic_irq_table[irqno];
1850 
1851 	/*
1852 	 * The irq table entry shouldn't exist unless the interrupts are shared.
1853 	 * In that case, make sure it matches what we would initialize it to.
1854 	 */
1855 	if (irqp != NULL) {
1856 		ASSERT(irqp->airq_mps_intr_index == ACPI_INDEX);
1857 		ASSERT(irqp->airq_intin_no == ipin &&
1858 		    irqp->airq_ioapicindex == ioapicindex);
1859 		vecp = xv_vector(irqp->airq_cpu, irqp->airq_vector);
1860 		ASSERT(!IS_VECT_FREE(vecp));
1861 		mutex_exit(&airq_mutex);
1862 	} else {
1863 		irqp = kmem_zalloc(sizeof (apic_irq_t), KM_SLEEP);
1864 
1865 		irqp->airq_cpu = IRQ_UNINIT;
1866 		irqp->airq_origirq = (uchar_t)irqno;
1867 		irqp->airq_mps_intr_index = ACPI_INDEX;
1868 		irqp->airq_ioapicindex = ioapicindex;
1869 		irqp->airq_intin_no = ipin;
1870 		irqp->airq_iflag = *flagp;
1871 		irqp->airq_share++;
1872 
1873 		apic_irq_table[irqno] = irqp;
1874 		mutex_exit(&airq_mutex);
1875 
1876 		vecp = apix_alloc_intx(NULL, 0, irqno);
1877 	}
1878 
1879 	/* copy over autovect */
1880 	for (avp = autovect[irqno].avh_link; avp; avp = avp->av_link)
1881 		apix_insert_av(vecp, avp->av_intr_id, avp->av_vector,
1882 		    avp->av_intarg1, avp->av_intarg2, avp->av_ticksp,
1883 		    avp->av_prilevel, avp->av_dip);
1884 
1885 	/* Program I/O APIC */
1886 	iflag = intr_clear();
1887 	lock_set(&apix_lock);
1888 
1889 	(void) apix_setup_io_intr(vecp);
1890 
1891 	lock_clear(&apix_lock);
1892 	intr_restore(iflag);
1893 
1894 	APIC_VERBOSE_IOAPIC((CE_CONT, "apix: setup ioapic, irqno %x "
1895 	    "(ioapic %x, ipin %x) is bound to cpu %x, vector %x\n",
1896 	    irqno, ioapicindex, ipin, irqp->airq_cpu, irqp->airq_vector));
1897 }
1898 
1899 void
1900 ioapix_init_intr(int mask_apic)
1901 {
1902 	int ioapicindex;
1903 	int i, j;
1904 
1905 	/* mask interrupt vectors */
1906 	for (j = 0; j < apic_io_max && mask_apic; j++) {
1907 		int intin_max;
1908 
1909 		ioapicindex = j;
1910 		/* Bits 23-16 define the maximum redirection entries */
1911 		intin_max = (ioapic_read(ioapicindex, APIC_VERS_CMD) >> 16)
1912 		    & 0xff;
1913 		for (i = 0; i <= intin_max; i++)
1914 			ioapic_write(ioapicindex, APIC_RDT_CMD + 2 * i,
1915 			    AV_MASK);
1916 	}
1917 
1918 	/*
1919 	 * Hack alert: deal with ACPI SCI interrupt chicken/egg here
1920 	 */
1921 	if (apic_sci_vect > 0)
1922 		ioapix_setup_intr(apic_sci_vect, &apic_sci_flags);
1923 
1924 	/*
1925 	 * Hack alert: deal with ACPI HPET interrupt chicken/egg here.
1926 	 */
1927 	if (apic_hpet_vect > 0)
1928 		ioapix_setup_intr(apic_hpet_vect, &apic_hpet_flags);
1929 }
1930