xref: /titanic_51/usr/src/uts/i86pc/io/pcplusmp/apic_introp.c (revision 36d41b68ce4ecc38f01ced5fe21dddf05a5f9289)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * apic_introp.c:
28  *	Has code for Advanced DDI interrupt framework support.
29  */
30 
31 #include <sys/cpuvar.h>
32 #include <sys/psm.h>
33 #include <sys/archsystm.h>
34 #include <sys/apic.h>
35 #include <sys/sunddi.h>
36 #include <sys/ddi_impldefs.h>
37 #include <sys/mach_intr.h>
38 #include <sys/sysmacros.h>
39 #include <sys/trap.h>
40 #include <sys/pci.h>
41 #include <sys/pci_intr_lib.h>
42 
43 extern struct av_head autovect[];
44 
45 /*
46  *	Local Function Prototypes
47  */
48 apic_irq_t	*apic_find_irq(dev_info_t *, struct intrspec *, int);
49 
50 /*
51  * MSI support flag:
52  * reflects whether MSI is supported at APIC level
53  * it can also be patched through /etc/system
54  *
55  *  0 = default value - don't know and need to call apic_check_msi_support()
56  *      to find out then set it accordingly
57  *  1 = supported
58  * -1 = not supported
59  */
60 int	apic_support_msi = 0;
61 
62 /* Multiple vector support for MSI */
63 #if !defined(__xpv)
64 int	apic_multi_msi_enable = 1;
65 #else
66 /*
67  * Xen hypervisor does not seem to properly support multi-MSI
68  */
69 int	apic_multi_msi_enable = 0;
70 #endif	/* __xpv */
71 
72 /* Multiple vector support for MSI-X */
73 int	apic_msix_enable = 1;
74 
75 /*
76  * apic_pci_msi_enable_vector:
77  *	Set the address/data fields in the MSI/X capability structure
78  *	XXX: MSI-X support
79  */
80 /* ARGSUSED */
81 void
82 apic_pci_msi_enable_vector(apic_irq_t *irq_ptr, int type, int inum, int vector,
83     int count, int target_apic_id)
84 {
85 	uint64_t		msi_addr, msi_data;
86 	ushort_t		msi_ctrl;
87 	dev_info_t		*dip = irq_ptr->airq_dip;
88 	int			cap_ptr = i_ddi_get_msi_msix_cap_ptr(dip);
89 	ddi_acc_handle_t	handle = i_ddi_get_pci_config_handle(dip);
90 #if !defined(__xpv)
91 	msi_regs_t		msi_regs;
92 #endif	/* ! __xpv */
93 
94 	DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: dip=0x%p\n"
95 	    "\tdriver = %s, inum=0x%x vector=0x%x apicid=0x%x\n", (void *)dip,
96 	    ddi_driver_name(dip), inum, vector, target_apic_id));
97 
98 	ASSERT((handle != NULL) && (cap_ptr != 0));
99 
100 #if !defined(__xpv)
101 	msi_regs.mr_data = vector;
102 	msi_regs.mr_addr = target_apic_id;
103 
104 	apic_vt_ops->apic_intrr_alloc_entry(irq_ptr);
105 	apic_vt_ops->apic_intrr_map_entry(irq_ptr, (void *)&msi_regs);
106 	apic_vt_ops->apic_intrr_record_msi(irq_ptr, &msi_regs);
107 
108 	/* MSI Address */
109 	msi_addr = msi_regs.mr_addr;
110 
111 	/* MSI Data: MSI is edge triggered according to spec */
112 	msi_data = msi_regs.mr_data;
113 #else
114 	/* MSI Address */
115 	msi_addr = (MSI_ADDR_HDR |
116 	    (target_apic_id << MSI_ADDR_DEST_SHIFT));
117 	msi_addr |= ((MSI_ADDR_RH_FIXED << MSI_ADDR_RH_SHIFT) |
118 	    (MSI_ADDR_DM_PHYSICAL << MSI_ADDR_DM_SHIFT));
119 
120 	/* MSI Data: MSI is edge triggered according to spec */
121 	msi_data = ((MSI_DATA_TM_EDGE << MSI_DATA_TM_SHIFT) | vector);
122 #endif	/* ! __xpv */
123 
124 	DDI_INTR_IMPLDBG((CE_CONT, "apic_pci_msi_enable_vector: addr=0x%lx "
125 	    "data=0x%lx\n", (long)msi_addr, (long)msi_data));
126 
127 	if (type == DDI_INTR_TYPE_MSI) {
128 		msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
129 
130 		/* Set the bits to inform how many MSIs are enabled */
131 		msi_ctrl |= ((highbit(count) -1) << PCI_MSI_MME_SHIFT);
132 		pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
133 
134 #if !defined(__xpv)
135 		/*
136 		 * Only set vector if not on hypervisor
137 		 */
138 		pci_config_put32(handle,
139 		    cap_ptr + PCI_MSI_ADDR_OFFSET, msi_addr);
140 
141 		if (msi_ctrl &  PCI_MSI_64BIT_MASK) {
142 			pci_config_put32(handle,
143 			    cap_ptr + PCI_MSI_ADDR_OFFSET + 4, msi_addr >> 32);
144 			pci_config_put16(handle,
145 			    cap_ptr + PCI_MSI_64BIT_DATA, msi_data);
146 		} else {
147 			pci_config_put16(handle,
148 			    cap_ptr + PCI_MSI_32BIT_DATA, msi_data);
149 		}
150 
151 	} else if (type == DDI_INTR_TYPE_MSIX) {
152 		uintptr_t	off;
153 		ddi_intr_msix_t	*msix_p = i_ddi_get_msix(dip);
154 
155 		ASSERT(msix_p != NULL);
156 
157 		/* Offset into the "inum"th entry in the MSI-X table */
158 		off = (uintptr_t)msix_p->msix_tbl_addr +
159 		    (inum  * PCI_MSIX_VECTOR_SIZE);
160 
161 		ddi_put32(msix_p->msix_tbl_hdl,
162 		    (uint32_t *)(off + PCI_MSIX_DATA_OFFSET), msi_data);
163 		ddi_put64(msix_p->msix_tbl_hdl,
164 		    (uint64_t *)(off + PCI_MSIX_LOWER_ADDR_OFFSET), msi_addr);
165 #endif	/* ! __xpv */
166 	}
167 }
168 
169 
170 #if !defined(__xpv)
171 
172 /*
173  * This function returns the no. of vectors available for the pri.
174  * dip is not used at this moment.  If we really don't need that,
175  * it will be removed.
176  */
177 /*ARGSUSED*/
178 int
179 apic_navail_vector(dev_info_t *dip, int pri)
180 {
181 	int	lowest, highest, i, navail, count;
182 
183 	DDI_INTR_IMPLDBG((CE_CONT, "apic_navail_vector: dip: %p, pri: %x\n",
184 	    (void *)dip, pri));
185 
186 	highest = apic_ipltopri[pri] + APIC_VECTOR_MASK;
187 	lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL;
188 	navail = count = 0;
189 
190 	if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */
191 		lowest -= APIC_VECTOR_PER_IPL;
192 
193 	/* It has to be contiguous */
194 	for (i = lowest; i < highest; i++) {
195 		count = 0;
196 		while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) &&
197 		    (i < highest)) {
198 			if (APIC_CHECK_RESERVE_VECTORS(i))
199 				break;
200 			count++;
201 			i++;
202 		}
203 		if (count > navail)
204 			navail = count;
205 	}
206 	return (navail);
207 }
208 
209 #endif	/* ! __xpv */
210 
211 /*
212  * Finds "count" contiguous MSI vectors starting at the proper alignment
213  * at "pri".
214  * Caller needs to make sure that count has to be power of 2 and should not
215  * be < 1.
216  */
217 uchar_t
218 apic_find_multi_vectors(int pri, int count)
219 {
220 	int	lowest, highest, i, navail, start, msibits;
221 
222 	DDI_INTR_IMPLDBG((CE_CONT, "apic_find_mult: pri: %x, count: %x\n",
223 	    pri, count));
224 
225 	highest = apic_ipltopri[pri] + APIC_VECTOR_MASK;
226 	lowest = apic_ipltopri[pri - 1] + APIC_VECTOR_PER_IPL;
227 	navail = 0;
228 
229 	if (highest < lowest) /* Both ipl and ipl - 1 map to same pri */
230 		lowest -= APIC_VECTOR_PER_IPL;
231 
232 	/*
233 	 * msibits is the no. of lower order message data bits for the
234 	 * allocated MSI vectors and is used to calculate the aligned
235 	 * starting vector
236 	 */
237 	msibits = count - 1;
238 
239 	/* It has to be contiguous */
240 	for (i = lowest; i < highest; i++) {
241 		navail = 0;
242 
243 		/*
244 		 * starting vector has to be aligned accordingly for
245 		 * multiple MSIs
246 		 */
247 		if (msibits)
248 			i = (i + msibits) & ~msibits;
249 		start = i;
250 		while ((apic_vector_to_irq[i] == APIC_RESV_IRQ) &&
251 		    (i < highest)) {
252 			if (APIC_CHECK_RESERVE_VECTORS(i))
253 				break;
254 			navail++;
255 			if (navail >= count)
256 				return (start);
257 			i++;
258 		}
259 	}
260 	return (0);
261 }
262 
263 
264 /*
265  * It finds the apic_irq_t associates with the dip, ispec and type.
266  */
267 apic_irq_t *
268 apic_find_irq(dev_info_t *dip, struct intrspec *ispec, int type)
269 {
270 	apic_irq_t	*irqp;
271 	int i;
272 
273 	DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: dip=0x%p vec=0x%x "
274 	    "ipl=0x%x type=0x%x\n", (void *)dip, ispec->intrspec_vec,
275 	    ispec->intrspec_pri, type));
276 
277 	for (i = apic_min_device_irq; i <= apic_max_device_irq; i++) {
278 		if ((irqp = apic_irq_table[i]) == NULL)
279 			continue;
280 		if ((irqp->airq_dip == dip) &&
281 		    (irqp->airq_origirq == ispec->intrspec_vec) &&
282 		    (irqp->airq_ipl == ispec->intrspec_pri)) {
283 			if (type == DDI_INTR_TYPE_MSI) {
284 				if (irqp->airq_mps_intr_index == MSI_INDEX)
285 					return (irqp);
286 			} else if (type == DDI_INTR_TYPE_MSIX) {
287 				if (irqp->airq_mps_intr_index == MSIX_INDEX)
288 					return (irqp);
289 			} else
290 				return (irqp);
291 		}
292 	}
293 	DDI_INTR_IMPLDBG((CE_CONT, "apic_find_irq: return NULL\n"));
294 	return (NULL);
295 }
296 
297 
298 #if !defined(__xpv)
299 
300 /*
301  * This function will return the pending bit of the irqp.
302  * It either comes from the IRR register of the APIC or the RDT
303  * entry of the I/O APIC.
304  * For the IRR to work, it needs to be to its binding CPU
305  */
306 static int
307 apic_get_pending(apic_irq_t *irqp, int type)
308 {
309 	int			bit, index, irr, pending;
310 	int			intin_no;
311 	int			apic_ix;
312 
313 	DDI_INTR_IMPLDBG((CE_CONT, "apic_get_pending: irqp: %p, cpuid: %x "
314 	    "type: %x\n", (void *)irqp, irqp->airq_cpu & ~IRQ_USER_BOUND,
315 	    type));
316 
317 	/* need to get on the bound cpu */
318 	mutex_enter(&cpu_lock);
319 	affinity_set(irqp->airq_cpu & ~IRQ_USER_BOUND);
320 
321 	index = irqp->airq_vector / 32;
322 	bit = irqp->airq_vector % 32;
323 	irr = apic_reg_ops->apic_read(APIC_IRR_REG + index);
324 
325 	affinity_clear();
326 	mutex_exit(&cpu_lock);
327 
328 	pending = (irr & (1 << bit)) ? 1 : 0;
329 	if (!pending && (type == DDI_INTR_TYPE_FIXED)) {
330 		/* check I/O APIC for fixed interrupt */
331 		intin_no = irqp->airq_intin_no;
332 		apic_ix = irqp->airq_ioapicindex;
333 		pending = (READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no) &
334 		    AV_PENDING) ? 1 : 0;
335 	}
336 	return (pending);
337 }
338 
339 
340 /*
341  * This function will clear the mask for the interrupt on the I/O APIC
342  */
343 static void
344 apic_clear_mask(apic_irq_t *irqp)
345 {
346 	int			intin_no;
347 	ulong_t			iflag;
348 	int32_t			rdt_entry;
349 	int 			apic_ix;
350 
351 	DDI_INTR_IMPLDBG((CE_CONT, "apic_clear_mask: irqp: %p\n",
352 	    (void *)irqp));
353 
354 	intin_no = irqp->airq_intin_no;
355 	apic_ix = irqp->airq_ioapicindex;
356 
357 	iflag = intr_clear();
358 	lock_set(&apic_ioapic_lock);
359 
360 	rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no);
361 
362 	/* clear mask */
363 	WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no,
364 	    ((~AV_MASK) & rdt_entry));
365 
366 	lock_clear(&apic_ioapic_lock);
367 	intr_restore(iflag);
368 }
369 
370 
371 /*
372  * This function will mask the interrupt on the I/O APIC
373  */
374 static void
375 apic_set_mask(apic_irq_t *irqp)
376 {
377 	int			intin_no;
378 	int 			apic_ix;
379 	ulong_t			iflag;
380 	int32_t			rdt_entry;
381 
382 	DDI_INTR_IMPLDBG((CE_CONT, "apic_set_mask: irqp: %p\n", (void *)irqp));
383 
384 	intin_no = irqp->airq_intin_no;
385 	apic_ix = irqp->airq_ioapicindex;
386 
387 	iflag = intr_clear();
388 
389 	lock_set(&apic_ioapic_lock);
390 
391 	rdt_entry = READ_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no);
392 
393 	/* mask it */
394 	WRITE_IOAPIC_RDT_ENTRY_LOW_DWORD(apic_ix, intin_no,
395 	    (AV_MASK | rdt_entry));
396 
397 	lock_clear(&apic_ioapic_lock);
398 	intr_restore(iflag);
399 }
400 
401 
402 void
403 apic_free_vectors(dev_info_t *dip, int inum, int count, int pri, int type)
404 {
405 	int i;
406 	apic_irq_t *irqptr;
407 	struct intrspec ispec;
408 
409 	DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: dip: %p inum: %x "
410 	    "count: %x pri: %x type: %x\n",
411 	    (void *)dip, inum, count, pri, type));
412 
413 	/* for MSI/X only */
414 	if (!DDI_INTR_IS_MSI_OR_MSIX(type))
415 		return;
416 
417 	for (i = 0; i < count; i++) {
418 		DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: inum=0x%x "
419 		    "pri=0x%x count=0x%x\n", inum, pri, count));
420 		ispec.intrspec_vec = inum + i;
421 		ispec.intrspec_pri = pri;
422 		if ((irqptr = apic_find_irq(dip, &ispec, type)) == NULL) {
423 			DDI_INTR_IMPLDBG((CE_CONT, "apic_free_vectors: "
424 			    "dip=0x%p inum=0x%x pri=0x%x apic_find_irq() "
425 			    "failed\n", (void *)dip, inum, pri));
426 			continue;
427 		}
428 		irqptr->airq_mps_intr_index = FREE_INDEX;
429 		apic_vector_to_irq[irqptr->airq_vector] = APIC_RESV_IRQ;
430 	}
431 }
432 
433 #endif	/* ! __xpv */
434 
435 /*
436  * check whether the system supports MSI
437  *
438  * If PCI-E capability is found, then this must be a PCI-E system.
439  * Since MSI is required for PCI-E system, it returns PSM_SUCCESS
440  * to indicate this system supports MSI.
441  */
442 int
443 apic_check_msi_support()
444 {
445 	dev_info_t *cdip;
446 	char dev_type[16];
447 	int dev_len;
448 
449 	DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support:\n"));
450 
451 	/*
452 	 * check whether the first level children of root_node have
453 	 * PCI-E capability
454 	 */
455 	for (cdip = ddi_get_child(ddi_root_node()); cdip != NULL;
456 	    cdip = ddi_get_next_sibling(cdip)) {
457 
458 		DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: cdip: 0x%p,"
459 		    " driver: %s, binding: %s, nodename: %s\n", (void *)cdip,
460 		    ddi_driver_name(cdip), ddi_binding_name(cdip),
461 		    ddi_node_name(cdip)));
462 		dev_len = sizeof (dev_type);
463 		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
464 		    "device_type", (caddr_t)dev_type, &dev_len)
465 		    != DDI_PROP_SUCCESS)
466 			continue;
467 		if (strcmp(dev_type, "pciex") == 0)
468 			return (PSM_SUCCESS);
469 	}
470 
471 	/* MSI is not supported on this system */
472 	DDI_INTR_IMPLDBG((CE_CONT, "apic_check_msi_support: no 'pciex' "
473 	    "device_type found\n"));
474 	return (PSM_FAILURE);
475 }
476 
477 #if !defined(__xpv)
478 
479 /*
480  * apic_pci_msi_unconfigure:
481  *
482  * This and next two interfaces are copied from pci_intr_lib.c
483  * Do ensure that these two files stay in sync.
484  * These needed to be copied over here to avoid a deadlock situation on
485  * certain mp systems that use MSI interrupts.
486  *
487  * IMPORTANT regards next three interfaces:
488  * i) are called only for MSI/X interrupts.
489  * ii) called with interrupts disabled, and must not block
490  */
491 void
492 apic_pci_msi_unconfigure(dev_info_t *rdip, int type, int inum)
493 {
494 	ushort_t		msi_ctrl;
495 	int			cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip);
496 	ddi_acc_handle_t	handle = i_ddi_get_pci_config_handle(rdip);
497 
498 	ASSERT((handle != NULL) && (cap_ptr != 0));
499 
500 	if (type == DDI_INTR_TYPE_MSI) {
501 		msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
502 		msi_ctrl &= (~PCI_MSI_MME_MASK);
503 		pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
504 		pci_config_put32(handle, cap_ptr + PCI_MSI_ADDR_OFFSET, 0);
505 
506 		if (msi_ctrl &  PCI_MSI_64BIT_MASK) {
507 			pci_config_put16(handle,
508 			    cap_ptr + PCI_MSI_64BIT_DATA, 0);
509 			pci_config_put32(handle,
510 			    cap_ptr + PCI_MSI_ADDR_OFFSET + 4, 0);
511 		} else {
512 			pci_config_put16(handle,
513 			    cap_ptr + PCI_MSI_32BIT_DATA, 0);
514 		}
515 
516 	} else if (type == DDI_INTR_TYPE_MSIX) {
517 		uintptr_t	off;
518 		uint32_t	mask;
519 		ddi_intr_msix_t	*msix_p = i_ddi_get_msix(rdip);
520 
521 		ASSERT(msix_p != NULL);
522 
523 		/* Offset into "inum"th entry in the MSI-X table & mask it */
524 		off = (uintptr_t)msix_p->msix_tbl_addr + (inum *
525 		    PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET;
526 
527 		mask = ddi_get32(msix_p->msix_tbl_hdl, (uint32_t *)off);
528 
529 		ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, (mask | 1));
530 
531 		/* Offset into the "inum"th entry in the MSI-X table */
532 		off = (uintptr_t)msix_p->msix_tbl_addr +
533 		    (inum * PCI_MSIX_VECTOR_SIZE);
534 
535 		/* Reset the "data" and "addr" bits */
536 		ddi_put32(msix_p->msix_tbl_hdl,
537 		    (uint32_t *)(off + PCI_MSIX_DATA_OFFSET), 0);
538 		ddi_put64(msix_p->msix_tbl_hdl, (uint64_t *)off, 0);
539 	}
540 }
541 
542 #endif	/* __xpv */
543 
544 /*
545  * apic_pci_msi_enable_mode:
546  */
547 void
548 apic_pci_msi_enable_mode(dev_info_t *rdip, int type, int inum)
549 {
550 	ushort_t		msi_ctrl;
551 	int			cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip);
552 	ddi_acc_handle_t	handle = i_ddi_get_pci_config_handle(rdip);
553 
554 	ASSERT((handle != NULL) && (cap_ptr != 0));
555 
556 	if (type == DDI_INTR_TYPE_MSI) {
557 		msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
558 		if ((msi_ctrl & PCI_MSI_ENABLE_BIT))
559 			return;
560 
561 		msi_ctrl |= PCI_MSI_ENABLE_BIT;
562 		pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
563 
564 	} else if (type == DDI_INTR_TYPE_MSIX) {
565 		uintptr_t	off;
566 		uint32_t	mask;
567 		ddi_intr_msix_t	*msix_p;
568 
569 		msix_p = i_ddi_get_msix(rdip);
570 
571 		ASSERT(msix_p != NULL);
572 
573 		/* Offset into "inum"th entry in the MSI-X table & clear mask */
574 		off = (uintptr_t)msix_p->msix_tbl_addr + (inum *
575 		    PCI_MSIX_VECTOR_SIZE) + PCI_MSIX_VECTOR_CTRL_OFFSET;
576 
577 		mask = ddi_get32(msix_p->msix_tbl_hdl, (uint32_t *)off);
578 
579 		ddi_put32(msix_p->msix_tbl_hdl, (uint32_t *)off, (mask & ~1));
580 
581 		msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL);
582 
583 		if (!(msi_ctrl & PCI_MSIX_ENABLE_BIT)) {
584 			msi_ctrl |= PCI_MSIX_ENABLE_BIT;
585 			pci_config_put16(handle, cap_ptr + PCI_MSIX_CTRL,
586 			    msi_ctrl);
587 		}
588 	}
589 }
590 
591 /*
592  * apic_pci_msi_disable_mode:
593  */
594 void
595 apic_pci_msi_disable_mode(dev_info_t *rdip, int type)
596 {
597 	ushort_t		msi_ctrl;
598 	int			cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip);
599 	ddi_acc_handle_t	handle = i_ddi_get_pci_config_handle(rdip);
600 
601 	ASSERT((handle != NULL) && (cap_ptr != 0));
602 
603 	if (type == DDI_INTR_TYPE_MSI) {
604 		msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
605 		if (!(msi_ctrl & PCI_MSI_ENABLE_BIT))
606 			return;
607 
608 		msi_ctrl &= ~PCI_MSI_ENABLE_BIT;	/* MSI disable */
609 		pci_config_put16(handle, cap_ptr + PCI_MSI_CTRL, msi_ctrl);
610 
611 	} else if (type == DDI_INTR_TYPE_MSIX) {
612 		msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSIX_CTRL);
613 		if (msi_ctrl & PCI_MSIX_ENABLE_BIT) {
614 			msi_ctrl &= ~PCI_MSIX_ENABLE_BIT;
615 			pci_config_put16(handle, cap_ptr + PCI_MSIX_CTRL,
616 			    msi_ctrl);
617 		}
618 	}
619 }
620 
621 #if !defined(__xpv)
622 
623 static int
624 apic_set_cpu(int irqno, int cpu, int *result)
625 {
626 	apic_irq_t *irqp;
627 	ulong_t iflag;
628 	int ret;
629 
630 	DDI_INTR_IMPLDBG((CE_CONT, "APIC_SET_CPU\n"));
631 
632 	mutex_enter(&airq_mutex);
633 	irqp = apic_irq_table[irqno];
634 	mutex_exit(&airq_mutex);
635 
636 	if (irqp == NULL) {
637 		*result = ENXIO;
638 		return (PSM_FAILURE);
639 	}
640 
641 	/* Fail if this is an MSI intr and is part of a group. */
642 	if ((irqp->airq_mps_intr_index == MSI_INDEX) &&
643 	    (irqp->airq_intin_no > 1)) {
644 		*result = ENXIO;
645 		return (PSM_FAILURE);
646 	}
647 
648 	iflag = intr_clear();
649 	lock_set(&apic_ioapic_lock);
650 
651 	ret = apic_rebind_all(irqp, cpu);
652 
653 	lock_clear(&apic_ioapic_lock);
654 	intr_restore(iflag);
655 
656 	if (ret) {
657 		*result = EIO;
658 		return (PSM_FAILURE);
659 	}
660 	/*
661 	 * keep tracking the default interrupt cpu binding
662 	 */
663 	irqp->airq_cpu = cpu;
664 
665 	*result = 0;
666 	return (PSM_SUCCESS);
667 }
668 
669 static int
670 apic_grp_set_cpu(int irqno, int new_cpu, int *result)
671 {
672 	dev_info_t *orig_dip;
673 	uint32_t orig_cpu;
674 	ulong_t iflag;
675 	apic_irq_t *irqps[PCI_MSI_MAX_INTRS];
676 	int i;
677 	int cap_ptr;
678 	int msi_mask_off;
679 	ushort_t msi_ctrl;
680 	uint32_t msi_pvm;
681 	ddi_acc_handle_t handle;
682 	int num_vectors = 0;
683 	uint32_t vector;
684 
685 	DDI_INTR_IMPLDBG((CE_CONT, "APIC_GRP_SET_CPU\n"));
686 
687 	/*
688 	 * Take mutex to insure that table doesn't change out from underneath
689 	 * us while we're playing with it.
690 	 */
691 	mutex_enter(&airq_mutex);
692 	irqps[0] = apic_irq_table[irqno];
693 	orig_cpu = irqps[0]->airq_temp_cpu;
694 	orig_dip = irqps[0]->airq_dip;
695 	num_vectors = irqps[0]->airq_intin_no;
696 	vector = irqps[0]->airq_vector;
697 
698 	/* A "group" of 1 */
699 	if (num_vectors == 1) {
700 		mutex_exit(&airq_mutex);
701 		return (apic_set_cpu(irqno, new_cpu, result));
702 	}
703 
704 	*result = ENXIO;
705 
706 	if (irqps[0]->airq_mps_intr_index != MSI_INDEX) {
707 		mutex_exit(&airq_mutex);
708 		DDI_INTR_IMPLDBG((CE_CONT, "set_grp: intr not MSI\n"));
709 		goto set_grp_intr_done;
710 	}
711 	if ((num_vectors < 1) || ((num_vectors - 1) & vector)) {
712 		mutex_exit(&airq_mutex);
713 		DDI_INTR_IMPLDBG((CE_CONT,
714 		    "set_grp: base vec not part of a grp or not aligned: "
715 		    "vec:0x%x, num_vec:0x%x\n", vector, num_vectors));
716 		goto set_grp_intr_done;
717 	}
718 	DDI_INTR_IMPLDBG((CE_CONT, "set_grp: num intrs in grp: %d\n",
719 	    num_vectors));
720 
721 	ASSERT((num_vectors + vector) < APIC_MAX_VECTOR);
722 
723 	*result = EIO;
724 
725 	/*
726 	 * All IRQ entries in the table for the given device will be not
727 	 * shared.  Since they are not shared, the dip in the table will
728 	 * be true to the device of interest.
729 	 */
730 	for (i = 1; i < num_vectors; i++) {
731 		irqps[i] = apic_irq_table[apic_vector_to_irq[vector + i]];
732 		if (irqps[i] == NULL) {
733 			mutex_exit(&airq_mutex);
734 			goto set_grp_intr_done;
735 		}
736 #ifdef DEBUG
737 		/* Sanity check: CPU and dip is the same for all entries. */
738 		if ((irqps[i]->airq_dip != orig_dip) ||
739 		    (irqps[i]->airq_temp_cpu != orig_cpu)) {
740 			mutex_exit(&airq_mutex);
741 			DDI_INTR_IMPLDBG((CE_CONT,
742 			    "set_grp: cpu or dip for vec 0x%x difft than for "
743 			    "vec 0x%x\n", vector, vector + i));
744 			DDI_INTR_IMPLDBG((CE_CONT,
745 			    "  cpu: %d vs %d, dip: 0x%p vs 0x%p\n", orig_cpu,
746 			    irqps[i]->airq_temp_cpu, (void *)orig_dip,
747 			    (void *)irqps[i]->airq_dip));
748 			goto set_grp_intr_done;
749 		}
750 #endif /* DEBUG */
751 	}
752 	mutex_exit(&airq_mutex);
753 
754 	cap_ptr = i_ddi_get_msi_msix_cap_ptr(orig_dip);
755 	handle = i_ddi_get_pci_config_handle(orig_dip);
756 	msi_ctrl = pci_config_get16(handle, cap_ptr + PCI_MSI_CTRL);
757 
758 	/* MSI Per vector masking is supported. */
759 	if (msi_ctrl & PCI_MSI_PVM_MASK) {
760 		if (msi_ctrl &  PCI_MSI_64BIT_MASK)
761 			msi_mask_off = cap_ptr + PCI_MSI_64BIT_MASKBITS;
762 		else
763 			msi_mask_off = cap_ptr + PCI_MSI_32BIT_MASK;
764 		msi_pvm = pci_config_get32(handle, msi_mask_off);
765 		pci_config_put32(handle, msi_mask_off, (uint32_t)-1);
766 		DDI_INTR_IMPLDBG((CE_CONT,
767 		    "set_grp: pvm supported.  Mask set to 0x%x\n",
768 		    pci_config_get32(handle, msi_mask_off)));
769 	}
770 
771 	iflag = intr_clear();
772 	lock_set(&apic_ioapic_lock);
773 
774 	/*
775 	 * Do the first rebind and check for errors.  Apic_rebind_all returns
776 	 * an error if the CPU is not accepting interrupts.  If the first one
777 	 * succeeds they all will.
778 	 */
779 	if (apic_rebind_all(irqps[0], new_cpu))
780 		(void) apic_rebind_all(irqps[0], orig_cpu);
781 	else {
782 		irqps[0]->airq_cpu = new_cpu;
783 
784 		for (i = 1; i < num_vectors; i++) {
785 			(void) apic_rebind_all(irqps[i], new_cpu);
786 			irqps[i]->airq_cpu = new_cpu;
787 		}
788 		*result = 0;	/* SUCCESS */
789 	}
790 
791 	lock_clear(&apic_ioapic_lock);
792 	intr_restore(iflag);
793 
794 	/* Reenable vectors if per vector masking is supported. */
795 	if (msi_ctrl & PCI_MSI_PVM_MASK) {
796 		pci_config_put32(handle, msi_mask_off, msi_pvm);
797 		DDI_INTR_IMPLDBG((CE_CONT,
798 		    "set_grp: pvm supported.  Mask restored to 0x%x\n",
799 		    pci_config_get32(handle, msi_mask_off)));
800 	}
801 
802 set_grp_intr_done:
803 	if (*result != 0)
804 		return (PSM_FAILURE);
805 
806 	return (PSM_SUCCESS);
807 }
808 
809 #else	/* __xpv */
810 
811 /*
812  * We let the hypervisor deal with msi configutation
813  * so just stub this out.
814  */
815 
816 /* ARGSUSED */
817 void
818 apic_pci_msi_unconfigure(dev_info_t *rdip, int type, int inum)
819 {
820 }
821 
822 #endif	/* __xpv */
823 
824 int
825 apic_get_vector_intr_info(int vecirq, apic_get_intr_t *intr_params_p)
826 {
827 	struct autovec *av_dev;
828 	uchar_t irqno;
829 	int i;
830 	apic_irq_t *irq_p;
831 
832 	/* Sanity check the vector/irq argument. */
833 	ASSERT((vecirq >= 0) || (vecirq <= APIC_MAX_VECTOR));
834 
835 	mutex_enter(&airq_mutex);
836 
837 	/*
838 	 * Convert the vecirq arg to an irq using vector_to_irq table
839 	 * if the arg is a vector.  Pass thru if already an irq.
840 	 */
841 	if ((intr_params_p->avgi_req_flags & PSMGI_INTRBY_FLAGS) ==
842 	    PSMGI_INTRBY_VEC)
843 		irqno = apic_vector_to_irq[vecirq];
844 	else
845 		irqno = vecirq;
846 
847 	irq_p = apic_irq_table[irqno];
848 
849 	if ((irq_p == NULL) ||
850 	    (irq_p->airq_temp_cpu == IRQ_UNBOUND) ||
851 	    (irq_p->airq_temp_cpu == IRQ_UNINIT)) {
852 		mutex_exit(&airq_mutex);
853 		return (PSM_FAILURE);
854 	}
855 
856 	if (intr_params_p->avgi_req_flags & PSMGI_REQ_CPUID) {
857 
858 		/* Get the (temp) cpu from apic_irq table, indexed by irq. */
859 		intr_params_p->avgi_cpu_id = irq_p->airq_temp_cpu;
860 
861 		/* Return user bound info for intrd. */
862 		if (intr_params_p->avgi_cpu_id & IRQ_USER_BOUND) {
863 			intr_params_p->avgi_cpu_id &= ~IRQ_USER_BOUND;
864 			intr_params_p->avgi_cpu_id |= PSMGI_CPU_USER_BOUND;
865 		}
866 	}
867 
868 	if (intr_params_p->avgi_req_flags & PSMGI_REQ_VECTOR)
869 		intr_params_p->avgi_vector = irq_p->airq_vector;
870 
871 	if (intr_params_p->avgi_req_flags &
872 	    (PSMGI_REQ_NUM_DEVS | PSMGI_REQ_GET_DEVS))
873 		/* Get number of devices from apic_irq table shared field. */
874 		intr_params_p->avgi_num_devs = irq_p->airq_share;
875 
876 	if (intr_params_p->avgi_req_flags &  PSMGI_REQ_GET_DEVS) {
877 
878 		intr_params_p->avgi_req_flags  |= PSMGI_REQ_NUM_DEVS;
879 
880 		/* Some devices have NULL dip.  Don't count these. */
881 		if (intr_params_p->avgi_num_devs > 0) {
882 			for (i = 0, av_dev = autovect[irqno].avh_link;
883 			    av_dev; av_dev = av_dev->av_link)
884 				if (av_dev->av_vector && av_dev->av_dip)
885 					i++;
886 			intr_params_p->avgi_num_devs =
887 			    MIN(intr_params_p->avgi_num_devs, i);
888 		}
889 
890 		/* There are no viable dips to return. */
891 		if (intr_params_p->avgi_num_devs == 0)
892 			intr_params_p->avgi_dip_list = NULL;
893 
894 		else {	/* Return list of dips */
895 
896 			/* Allocate space in array for that number of devs. */
897 			intr_params_p->avgi_dip_list = kmem_zalloc(
898 			    intr_params_p->avgi_num_devs *
899 			    sizeof (dev_info_t *),
900 			    KM_SLEEP);
901 
902 			/*
903 			 * Loop through the device list of the autovec table
904 			 * filling in the dip array.
905 			 *
906 			 * Note that the autovect table may have some special
907 			 * entries which contain NULL dips.  These will be
908 			 * ignored.
909 			 */
910 			for (i = 0, av_dev = autovect[irqno].avh_link;
911 			    av_dev; av_dev = av_dev->av_link)
912 				if (av_dev->av_vector && av_dev->av_dip)
913 					intr_params_p->avgi_dip_list[i++] =
914 					    av_dev->av_dip;
915 		}
916 	}
917 
918 	mutex_exit(&airq_mutex);
919 
920 	return (PSM_SUCCESS);
921 }
922 
923 
924 #if !defined(__xpv)
925 
926 /*
927  * This function provides external interface to the nexus for all
928  * functionalities related to the new DDI interrupt framework.
929  *
930  * Input:
931  * dip     - pointer to the dev_info structure of the requested device
932  * hdlp    - pointer to the internal interrupt handle structure for the
933  *	     requested interrupt
934  * intr_op - opcode for this call
935  * result  - pointer to the integer that will hold the result to be
936  *	     passed back if return value is PSM_SUCCESS
937  *
938  * Output:
939  * return value is either PSM_SUCCESS or PSM_FAILURE
940  */
941 int
942 apic_intr_ops(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp,
943     psm_intr_op_t intr_op, int *result)
944 {
945 	int		cap;
946 	int		count_vec;
947 	int		old_priority;
948 	int		new_priority;
949 	int		new_cpu;
950 	apic_irq_t	*irqp;
951 	struct intrspec *ispec, intr_spec;
952 
953 	DDI_INTR_IMPLDBG((CE_CONT, "apic_intr_ops: dip: %p hdlp: %p "
954 	    "intr_op: %x\n", (void *)dip, (void *)hdlp, intr_op));
955 
956 	ispec = &intr_spec;
957 	ispec->intrspec_pri = hdlp->ih_pri;
958 	ispec->intrspec_vec = hdlp->ih_inum;
959 	ispec->intrspec_func = hdlp->ih_cb_func;
960 
961 	switch (intr_op) {
962 	case PSM_INTR_OP_CHECK_MSI:
963 		/*
964 		 * Check MSI/X is supported or not at APIC level and
965 		 * masked off the MSI/X bits in hdlp->ih_type if not
966 		 * supported before return.  If MSI/X is supported,
967 		 * leave the ih_type unchanged and return.
968 		 *
969 		 * hdlp->ih_type passed in from the nexus has all the
970 		 * interrupt types supported by the device.
971 		 */
972 		if (apic_support_msi == 0) {
973 			/*
974 			 * if apic_support_msi is not set, call
975 			 * apic_check_msi_support() to check whether msi
976 			 * is supported first
977 			 */
978 			if (apic_check_msi_support() == PSM_SUCCESS)
979 				apic_support_msi = 1;
980 			else
981 				apic_support_msi = -1;
982 		}
983 		if (apic_support_msi == 1) {
984 			if (apic_msix_enable)
985 				*result = hdlp->ih_type;
986 			else
987 				*result = hdlp->ih_type & ~DDI_INTR_TYPE_MSIX;
988 		} else
989 			*result = hdlp->ih_type & ~(DDI_INTR_TYPE_MSI |
990 			    DDI_INTR_TYPE_MSIX);
991 		break;
992 	case PSM_INTR_OP_ALLOC_VECTORS:
993 		if (hdlp->ih_type == DDI_INTR_TYPE_MSI)
994 			*result = apic_alloc_msi_vectors(dip, hdlp->ih_inum,
995 			    hdlp->ih_scratch1, hdlp->ih_pri,
996 			    (int)(uintptr_t)hdlp->ih_scratch2);
997 		else
998 			*result = apic_alloc_msix_vectors(dip, hdlp->ih_inum,
999 			    hdlp->ih_scratch1, hdlp->ih_pri,
1000 			    (int)(uintptr_t)hdlp->ih_scratch2);
1001 		break;
1002 	case PSM_INTR_OP_FREE_VECTORS:
1003 		apic_free_vectors(dip, hdlp->ih_inum, hdlp->ih_scratch1,
1004 		    hdlp->ih_pri, hdlp->ih_type);
1005 		break;
1006 	case PSM_INTR_OP_NAVAIL_VECTORS:
1007 		*result = apic_navail_vector(dip, hdlp->ih_pri);
1008 		break;
1009 	case PSM_INTR_OP_XLATE_VECTOR:
1010 		ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp;
1011 		*result = apic_introp_xlate(dip, ispec, hdlp->ih_type);
1012 		break;
1013 	case PSM_INTR_OP_GET_PENDING:
1014 		if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL)
1015 			return (PSM_FAILURE);
1016 		*result = apic_get_pending(irqp, hdlp->ih_type);
1017 		break;
1018 	case PSM_INTR_OP_CLEAR_MASK:
1019 		if (hdlp->ih_type != DDI_INTR_TYPE_FIXED)
1020 			return (PSM_FAILURE);
1021 		irqp = apic_find_irq(dip, ispec, hdlp->ih_type);
1022 		if (irqp == NULL)
1023 			return (PSM_FAILURE);
1024 		apic_clear_mask(irqp);
1025 		break;
1026 	case PSM_INTR_OP_SET_MASK:
1027 		if (hdlp->ih_type != DDI_INTR_TYPE_FIXED)
1028 			return (PSM_FAILURE);
1029 		if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL)
1030 			return (PSM_FAILURE);
1031 		apic_set_mask(irqp);
1032 		break;
1033 	case PSM_INTR_OP_GET_CAP:
1034 		cap = DDI_INTR_FLAG_PENDING;
1035 		if (hdlp->ih_type == DDI_INTR_TYPE_FIXED)
1036 			cap |= DDI_INTR_FLAG_MASKABLE;
1037 		else if (hdlp->ih_type == DDI_INTR_TYPE_MSIX)
1038 			cap |= DDI_INTR_FLAG_RETARGETABLE;
1039 		*result = cap;
1040 		break;
1041 	case PSM_INTR_OP_GET_SHARED:
1042 		if (hdlp->ih_type != DDI_INTR_TYPE_FIXED)
1043 			return (PSM_FAILURE);
1044 		ispec = ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp;
1045 		if ((irqp = apic_find_irq(dip, ispec, hdlp->ih_type)) == NULL)
1046 			return (PSM_FAILURE);
1047 		*result = (irqp->airq_share > 1) ? 1: 0;
1048 		break;
1049 	case PSM_INTR_OP_SET_PRI:
1050 		old_priority = hdlp->ih_pri;	/* save old value */
1051 		new_priority = *(int *)result;	/* try the new value */
1052 
1053 		if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) {
1054 			return (PSM_SUCCESS);
1055 		}
1056 
1057 		/* Now allocate the vectors */
1058 		if (hdlp->ih_type == DDI_INTR_TYPE_MSI) {
1059 			/* SET_PRI does not support the case of multiple MSI */
1060 			if (i_ddi_intr_get_current_nintrs(hdlp->ih_dip) > 1)
1061 				return (PSM_FAILURE);
1062 
1063 			count_vec = apic_alloc_msi_vectors(dip, hdlp->ih_inum,
1064 			    1, new_priority,
1065 			    DDI_INTR_ALLOC_STRICT);
1066 		} else {
1067 			count_vec = apic_alloc_msix_vectors(dip, hdlp->ih_inum,
1068 			    1, new_priority,
1069 			    DDI_INTR_ALLOC_STRICT);
1070 		}
1071 
1072 		/* Did we get new vectors? */
1073 		if (!count_vec)
1074 			return (PSM_FAILURE);
1075 
1076 		/* Finally, free the previously allocated vectors */
1077 		apic_free_vectors(dip, hdlp->ih_inum, count_vec,
1078 		    old_priority, hdlp->ih_type);
1079 		break;
1080 	case PSM_INTR_OP_SET_CPU:
1081 	case PSM_INTR_OP_GRP_SET_CPU:
1082 		/*
1083 		 * The interrupt handle given here has been allocated
1084 		 * specifically for this command, and ih_private carries
1085 		 * a CPU value.
1086 		 */
1087 		new_cpu = (int)(intptr_t)hdlp->ih_private;
1088 		if (!apic_cpu_in_range(new_cpu)) {
1089 			DDI_INTR_IMPLDBG((CE_CONT,
1090 			    "[grp_]set_cpu: cpu out of range: %d\n", new_cpu));
1091 			*result = EINVAL;
1092 			return (PSM_FAILURE);
1093 		}
1094 		if (hdlp->ih_vector > APIC_MAX_VECTOR) {
1095 			DDI_INTR_IMPLDBG((CE_CONT,
1096 			    "[grp_]set_cpu: vector out of range: %d\n",
1097 			    hdlp->ih_vector));
1098 			*result = EINVAL;
1099 			return (PSM_FAILURE);
1100 		}
1101 		if (!(hdlp->ih_flags & PSMGI_INTRBY_IRQ))
1102 			hdlp->ih_vector = apic_vector_to_irq[hdlp->ih_vector];
1103 		if (intr_op == PSM_INTR_OP_SET_CPU) {
1104 			if (apic_set_cpu(hdlp->ih_vector, new_cpu, result) !=
1105 			    PSM_SUCCESS)
1106 				return (PSM_FAILURE);
1107 		} else {
1108 			if (apic_grp_set_cpu(hdlp->ih_vector, new_cpu,
1109 			    result) != PSM_SUCCESS)
1110 				return (PSM_FAILURE);
1111 		}
1112 		break;
1113 	case PSM_INTR_OP_GET_INTR:
1114 		/*
1115 		 * The interrupt handle given here has been allocated
1116 		 * specifically for this command, and ih_private carries
1117 		 * a pointer to a apic_get_intr_t.
1118 		 */
1119 		if (apic_get_vector_intr_info(
1120 		    hdlp->ih_vector, hdlp->ih_private) != PSM_SUCCESS)
1121 			return (PSM_FAILURE);
1122 		break;
1123 	case PSM_INTR_OP_APIC_TYPE:
1124 		hdlp->ih_private = apic_get_apic_type();
1125 		hdlp->ih_ver = apic_get_apic_version();
1126 		break;
1127 	case PSM_INTR_OP_SET_CAP:
1128 	default:
1129 		return (PSM_FAILURE);
1130 	}
1131 	return (PSM_SUCCESS);
1132 }
1133 #endif	/* !__xpv */
1134