xref: /illumos-gate/usr/src/uts/sun4u/io/pci/pci_intr.c (revision b12258b69ac245658b0ca5ae070b3ff004186148)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 /*
26  * Copyright 2019 Peter Tribble.
27  */
28 
29 /*
30  * PCI nexus interrupt handling:
31  *	PCI device interrupt handler wrapper
32  *	pil lookup routine
33  *	PCI device interrupt related initchild code
34  */
35 
36 #include <sys/types.h>
37 #include <sys/kmem.h>
38 #include <sys/async.h>
39 #include <sys/spl.h>
40 #include <sys/sunddi.h>
41 #include <sys/machsystm.h>	/* e_ddi_nodeid_to_dip() */
42 #include <sys/ddi_impldefs.h>
43 #include <sys/pci/pci_obj.h>
44 #include <sys/sdt.h>
45 #include <sys/clock.h>
46 
47 /*
48  * interrupt jabber:
49  *
50  * When an interrupt line is jabbering, every time the state machine for the
51  * associated ino is idled, a new mondo will be sent and the ino will go into
52  * the pending state again. The mondo will cause a new call to
53  * pci_intr_wrapper() which normally idles the ino's state machine which would
54  * precipitate another trip round the loop.
55  * The loop can be broken by preventing the ino's state machine from being
56  * idled when an interrupt line is jabbering. See the comment at the
57  * beginning of pci_intr_wrapper() explaining how the 'interrupt jabber
58  * protection' code does this.
59  */
60 
61 /*LINTLIBRARY*/
62 
63 #ifdef NOT_DEFINED
64 /*
65  * This array is used to determine the sparc PIL at the which the
66  * handler for a given INO will execute.  This table is for onboard
67  * devices only.  A different scheme will be used for plug-in cards.
68  */
69 
70 uint_t ino_to_pil[] = {
71 
72 	/* pil */		/* ino */
73 
74 	0, 0, 0, 0,  		/* 0x00 - 0x03: bus A slot 0 int#A, B, C, D */
75 	0, 0, 0, 0,		/* 0x04 - 0x07: bus A slot 1 int#A, B, C, D */
76 	0, 0, 0, 0,  		/* 0x08 - 0x0B: unused */
77 	0, 0, 0, 0,		/* 0x0C - 0x0F: unused */
78 
79 	0, 0, 0, 0,  		/* 0x10 - 0x13: bus B slot 0 int#A, B, C, D */
80 	0, 0, 0, 0,		/* 0x14 - 0x17: bus B slot 1 int#A, B, C, D */
81 	0, 0, 0, 0,  		/* 0x18 - 0x1B: bus B slot 2 int#A, B, C, D */
82 	4, 0, 0, 0,		/* 0x1C - 0x1F: bus B slot 3 int#A, B, C, D */
83 
84 	4,			/* 0x20: SCSI */
85 	6,			/* 0x21: ethernet */
86 	3,			/* 0x22: parallel port */
87 	9,			/* 0x23: audio record */
88 	9,			/* 0x24: audio playback */
89 	14,			/* 0x25: power fail */
90 	4,			/* 0x26: 2nd SCSI */
91 	8,			/* 0x27: floppy */
92 	14,			/* 0x28: thermal warning */
93 	12,			/* 0x29: keyboard */
94 	12,			/* 0x2A: mouse */
95 	12,			/* 0x2B: serial */
96 	0,			/* 0x2C: timer/counter 0 */
97 	0,			/* 0x2D: timer/counter 1 */
98 	14,			/* 0x2E: uncorrectable ECC errors */
99 	14,			/* 0x2F: correctable ECC errors */
100 	14,			/* 0x30: PCI bus A error */
101 	14,			/* 0x31: PCI bus B error */
102 	14,			/* 0x32: power management wakeup */
103 	14,			/* 0x33 */
104 	14,			/* 0x34 */
105 	14,			/* 0x35 */
106 	14,			/* 0x36 */
107 	14,			/* 0x37 */
108 	14,			/* 0x38 */
109 	14,			/* 0x39 */
110 	14,			/* 0x3a */
111 	14,			/* 0x3b */
112 	14,			/* 0x3c */
113 	14,			/* 0x3d */
114 	14,			/* 0x3e */
115 	14,			/* 0x3f */
116 	14			/* 0x40 */
117 };
118 #endif /* NOT_DEFINED */
119 
120 
121 #define	PCI_SIMBA_VENID		0x108e	/* vendor id for simba */
122 #define	PCI_SIMBA_DEVID		0x5000	/* device id for simba */
123 
124 /*
125  * map_pcidev_cfg_reg - create mapping to pci device configuration registers
126  *			if we have a simba AND a pci to pci bridge along the
127  *			device path.
128  *			Called with corresponding mutexes held!!
129  *
130  * XXX	  XXX	XXX	The purpose of this routine is to overcome a hardware
131  *			defect in Sabre CPU and Simba bridge configuration
132  *			which does not drain DMA write data stalled in
133  *			PCI to PCI bridges (such as the DEC bridge) beyond
134  *			Simba. This routine will setup the data structures
135  *			to allow the pci_intr_wrapper to perform a manual
136  *			drain data operation before passing the control to
137  *			interrupt handlers of device drivers.
138  * return value:
139  * DDI_SUCCESS
140  * DDI_FAILURE		if unable to create mapping
141  */
142 static int
143 map_pcidev_cfg_reg(dev_info_t *dip, dev_info_t *rdip, ddi_acc_handle_t *hdl_p)
144 {
145 	dev_info_t *cdip;
146 	dev_info_t *pci_dip = NULL;
147 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
148 	int simba_found = 0, pci_bridge_found = 0;
149 
150 	for (cdip = rdip; cdip && cdip != dip; cdip = ddi_get_parent(cdip)) {
151 		ddi_acc_handle_t config_handle;
152 		uint32_t vendor_id = ddi_getprop(DDI_DEV_T_ANY, cdip,
153 		    DDI_PROP_DONTPASS, "vendor-id", 0xffff);
154 
155 		DEBUG4(DBG_A_INTX, pci_p->pci_dip,
156 		    "map dev cfg reg for %s%d: @%s%d\n",
157 		    ddi_driver_name(rdip), ddi_get_instance(rdip),
158 		    ddi_driver_name(cdip), ddi_get_instance(cdip));
159 
160 		if (ddi_prop_exists(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
161 		    "no-dma-interrupt-sync"))
162 			continue;
163 
164 		/* continue to search up-stream if not a PCI device */
165 		if (vendor_id == 0xffff)
166 			continue;
167 
168 		/* record the deepest pci device */
169 		if (!pci_dip)
170 			pci_dip = cdip;
171 
172 		/* look for simba */
173 		if (vendor_id == PCI_SIMBA_VENID) {
174 			uint32_t device_id = ddi_getprop(DDI_DEV_T_ANY,
175 			    cdip, DDI_PROP_DONTPASS, "device-id", -1);
176 			if (device_id == PCI_SIMBA_DEVID) {
177 				simba_found = 1;
178 				DEBUG0(DBG_A_INTX, pci_p->pci_dip,
179 				    "\tFound simba\n");
180 				continue; /* do not check bridge if simba */
181 			}
182 		}
183 
184 		/* look for pci to pci bridge */
185 		if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) {
186 			cmn_err(CE_WARN,
187 			    "%s%d: can't get brdg cfg space for %s%d\n",
188 			    ddi_driver_name(dip), ddi_get_instance(dip),
189 			    ddi_driver_name(cdip), ddi_get_instance(cdip));
190 			return (DDI_FAILURE);
191 		}
192 		if (pci_config_get8(config_handle, PCI_CONF_BASCLASS)
193 		    == PCI_CLASS_BRIDGE) {
194 			DEBUG0(DBG_A_INTX, pci_p->pci_dip,
195 			    "\tFound PCI to xBus bridge\n");
196 			pci_bridge_found = 1;
197 		}
198 		pci_config_teardown(&config_handle);
199 	}
200 
201 	if (!pci_bridge_found)
202 		return (DDI_SUCCESS);
203 	if (!simba_found && (CHIP_TYPE(pci_p) < PCI_CHIP_SCHIZO))
204 		return (DDI_SUCCESS);
205 	if (pci_config_setup(pci_dip, hdl_p) != DDI_SUCCESS) {
206 		cmn_err(CE_WARN, "%s%d: can not get config space for %s%d\n",
207 		    ddi_driver_name(dip), ddi_get_instance(dip),
208 		    ddi_driver_name(cdip), ddi_get_instance(cdip));
209 		return (DDI_FAILURE);
210 	}
211 	return (DDI_SUCCESS);
212 }
213 
214 /*
215  * If the unclaimed interrupt count has reached the limit set by
216  * pci_unclaimed_intr_max within the time limit, then all interrupts
217  * on this ino is blocked by not idling the interrupt state machine.
218  */
219 static int
220 pci_spurintr(ib_ino_pil_t *ipil_p) {
221 	ib_ino_info_t	*ino_p = ipil_p->ipil_ino_p;
222 	ih_t		*ih_p = ipil_p->ipil_ih_start;
223 	pci_t		*pci_p = ino_p->ino_ib_p->ib_pci_p;
224 	char		*err_fmt_str;
225 	boolean_t	blocked = B_FALSE;
226 	int		i;
227 
228 	if (ino_p->ino_unclaimed_intrs > pci_unclaimed_intr_max)
229 		return (DDI_INTR_CLAIMED);
230 
231 	if (!ino_p->ino_unclaimed_intrs)
232 		ino_p->ino_spurintr_begin = ddi_get_lbolt();
233 
234 	ino_p->ino_unclaimed_intrs++;
235 
236 	if (ino_p->ino_unclaimed_intrs <= pci_unclaimed_intr_max)
237 		goto clear;
238 
239 	if (drv_hztousec(ddi_get_lbolt() - ino_p->ino_spurintr_begin)
240 	    > pci_spurintr_duration) {
241 		ino_p->ino_unclaimed_intrs = 0;
242 		goto clear;
243 	}
244 	err_fmt_str = "%s%d: ino 0x%x blocked";
245 	blocked = B_TRUE;
246 	goto warn;
247 clear:
248 	if (!pci_spurintr_msgs) { /* tomatillo errata #71 spurious mondo */
249 		/* clear the pending state */
250 		IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);
251 		return (DDI_INTR_CLAIMED);
252 	}
253 
254 	err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x";
255 warn:
256 	cmn_err(CE_WARN, err_fmt_str, NAMEINST(pci_p->pci_dip), ino_p->ino_ino);
257 	for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next)
258 		cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip),
259 		    ih_p->ih_inum);
260 	cmn_err(CE_CONT, "!\n");
261 	if (blocked == B_FALSE)  /* clear the pending state */
262 		IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);
263 
264 	return (DDI_INTR_CLAIMED);
265 }
266 
267 /*
268  * pci_intr_wrapper
269  *
270  * This routine is used as wrapper around interrupt handlers installed by child
271  * device drivers.  This routine invokes the driver interrupt handlers and
272  * examines the return codes.
273  * There is a count of unclaimed interrupts kept on a per-ino basis. If at
274  * least one handler claims the interrupt then the counter is halved and the
275  * interrupt state machine is idled. If no handler claims the interrupt then
276  * the counter is incremented by one and the state machine is idled.
277  * If the count ever reaches the limit value set by pci_unclaimed_intr_max
278  * then the interrupt state machine is not idled thus preventing any further
279  * interrupts on that ino. The state machine will only be idled again if a
280  * handler is subsequently added or removed.
281  *
282  * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt,
283  * DDI_INTR_UNCLAIMED otherwise.
284  */
285 
286 extern uint64_t intr_get_time(void);
287 
288 uint_t
289 pci_intr_wrapper(caddr_t arg)
290 {
291 	ib_ino_pil_t	*ipil_p = (ib_ino_pil_t *)arg;
292 	ib_ino_info_t	*ino_p = ipil_p->ipil_ino_p;
293 	uint_t		result = 0, r = DDI_INTR_UNCLAIMED;
294 	pci_t		*pci_p = ino_p->ino_ib_p->ib_pci_p;
295 	pbm_t		*pbm_p = pci_p->pci_pbm_p;
296 	ih_t		*ih_p = ipil_p->ipil_ih_start;
297 	int		i;
298 
299 	for (i = 0; i < ipil_p->ipil_ih_size; i++, ih_p = ih_p->ih_next) {
300 		dev_info_t *dip = ih_p->ih_dip;
301 		uint_t (*handler)() = ih_p->ih_handler;
302 		caddr_t arg1 = ih_p->ih_handler_arg1;
303 		caddr_t arg2 = ih_p->ih_handler_arg2;
304 		ddi_acc_handle_t cfg_hdl = ih_p->ih_config_handle;
305 
306 		if (pci_intr_dma_sync && cfg_hdl && pbm_p->pbm_sync_reg_pa) {
307 			(void) pci_config_get16(cfg_hdl, PCI_CONF_VENID);
308 			pci_pbm_dma_sync(pbm_p, ino_p->ino_ino);
309 		}
310 
311 		if (ih_p->ih_intr_state == PCI_INTR_STATE_DISABLE) {
312 			DEBUG3(DBG_INTR, pci_p->pci_dip,
313 			    "pci_intr_wrapper: %s%d interrupt %d is disabled\n",
314 			    ddi_driver_name(dip), ddi_get_instance(dip),
315 			    ino_p->ino_ino);
316 
317 			continue;
318 		}
319 
320 		DTRACE_PROBE4(interrupt__start, dev_info_t, dip,
321 		    void *, handler, caddr_t, arg1, caddr_t, arg2);
322 
323 		r = (*handler)(arg1, arg2);
324 
325 		/*
326 		 * Account for time used by this interrupt. Protect against
327 		 * conflicting writes to ih_ticks from ib_intr_dist_all() by
328 		 * using atomic ops.
329 		 */
330 
331 		if (ipil_p->ipil_pil <= LOCK_LEVEL)
332 			atomic_add_64(&ih_p->ih_ticks, intr_get_time());
333 
334 		DTRACE_PROBE4(interrupt__complete, dev_info_t, dip,
335 		    void *, handler, caddr_t, arg1, int, r);
336 
337 		result += r;
338 
339 		if (pci_check_all_handlers)
340 			continue;
341 		if (result)
342 			break;
343 	}
344 
345 	if (result)
346 		ino_p->ino_claimed |= (1 << ipil_p->ipil_pil);
347 
348 	/* Interrupt can only be cleared after all pil levels are handled */
349 	if (ipil_p->ipil_pil != ino_p->ino_lopil)
350 		return (DDI_INTR_CLAIMED);
351 
352 	if (!ino_p->ino_claimed)
353 		return (pci_spurintr(ipil_p));
354 
355 	ino_p->ino_unclaimed_intrs = 0;
356 	ino_p->ino_claimed = 0;
357 
358 	/* Clear the pending state */
359 	IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);
360 
361 	return (DDI_INTR_CLAIMED);
362 }
363 
364 dev_info_t *
365 get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip)
366 {
367 	dev_info_t *cdip = rdip;
368 
369 	for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip))
370 		;
371 
372 	return (cdip);
373 }
374 
375 static struct {
376 	kstat_named_t pciintr_ks_name;
377 	kstat_named_t pciintr_ks_type;
378 	kstat_named_t pciintr_ks_cpu;
379 	kstat_named_t pciintr_ks_pil;
380 	kstat_named_t pciintr_ks_time;
381 	kstat_named_t pciintr_ks_ino;
382 	kstat_named_t pciintr_ks_cookie;
383 	kstat_named_t pciintr_ks_devpath;
384 	kstat_named_t pciintr_ks_buspath;
385 } pciintr_ks_template = {
386 	{ "name",	KSTAT_DATA_CHAR },
387 	{ "type",	KSTAT_DATA_CHAR },
388 	{ "cpu",	KSTAT_DATA_UINT64 },
389 	{ "pil",	KSTAT_DATA_UINT64 },
390 	{ "time",	KSTAT_DATA_UINT64 },
391 	{ "ino",	KSTAT_DATA_UINT64 },
392 	{ "cookie",	KSTAT_DATA_UINT64 },
393 	{ "devpath",	KSTAT_DATA_STRING },
394 	{ "buspath",	KSTAT_DATA_STRING },
395 };
396 static uint32_t pciintr_ks_instance;
397 static char ih_devpath[MAXPATHLEN];
398 static char ih_buspath[MAXPATHLEN];
399 
400 kmutex_t pciintr_ks_template_lock;
401 
402 int
403 pci_ks_update(kstat_t *ksp, int rw)
404 {
405 	ih_t		*ih_p = ksp->ks_private;
406 	int	maxlen = sizeof (pciintr_ks_template.pciintr_ks_name.value.c);
407 	ib_ino_pil_t	*ipil_p = ih_p->ih_ipil_p;
408 	ib_ino_info_t	*ino_p = ipil_p->ipil_ino_p;
409 	ib_t		*ib_p = ino_p->ino_ib_p;
410 	pci_t		*pci_p = ib_p->ib_pci_p;
411 	ib_ino_t	ino;
412 
413 	ino = ino_p->ino_ino;
414 
415 	(void) snprintf(pciintr_ks_template.pciintr_ks_name.value.c, maxlen,
416 	    "%s%d", ddi_driver_name(ih_p->ih_dip),
417 	    ddi_get_instance(ih_p->ih_dip));
418 
419 	(void) ddi_pathname(ih_p->ih_dip, ih_devpath);
420 	(void) ddi_pathname(pci_p->pci_dip, ih_buspath);
421 	kstat_named_setstr(&pciintr_ks_template.pciintr_ks_devpath, ih_devpath);
422 	kstat_named_setstr(&pciintr_ks_template.pciintr_ks_buspath, ih_buspath);
423 
424 	if (ih_p->ih_intr_state == PCI_INTR_STATE_ENABLE) {
425 		(void) strcpy(pciintr_ks_template.pciintr_ks_type.value.c,
426 		    "fixed");
427 		pciintr_ks_template.pciintr_ks_cpu.value.ui64 =
428 		    ino_p->ino_cpuid;
429 		pciintr_ks_template.pciintr_ks_pil.value.ui64 =
430 		    ipil_p->ipil_pil;
431 		pciintr_ks_template.pciintr_ks_time.value.ui64 = ih_p->ih_nsec +
432 		    (uint64_t)tick2ns((hrtime_t)ih_p->ih_ticks,
433 		    ino_p->ino_cpuid);
434 		pciintr_ks_template.pciintr_ks_ino.value.ui64 = ino;
435 		pciintr_ks_template.pciintr_ks_cookie.value.ui64 =
436 		    IB_INO_TO_MONDO(ib_p, ino);
437 	} else {
438 		(void) strcpy(pciintr_ks_template.pciintr_ks_type.value.c,
439 		    "disabled");
440 		pciintr_ks_template.pciintr_ks_cpu.value.ui64 = 0;
441 		pciintr_ks_template.pciintr_ks_pil.value.ui64 = 0;
442 		pciintr_ks_template.pciintr_ks_time.value.ui64 = 0;
443 		pciintr_ks_template.pciintr_ks_ino.value.ui64 = 0;
444 		pciintr_ks_template.pciintr_ks_cookie.value.ui64 = 0;
445 	}
446 
447 	return (0);
448 }
449 
450 int
451 pci_add_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp)
452 {
453 	pci_t		*pci_p = get_pci_soft_state(ddi_get_instance(dip));
454 	ib_t		*ib_p = pci_p->pci_ib_p;
455 	cb_t		*cb_p = pci_p->pci_cb_p;
456 	ih_t		*ih_p;
457 	ib_ino_t	ino;
458 	ib_ino_info_t	*ino_p;	/* pulse interrupts have no ino */
459 	ib_ino_pil_t	*ipil_p, *ipil_list;
460 	ib_mondo_t	mondo;
461 	uint32_t	cpu_id;
462 	int		ret;
463 	int32_t		weight;
464 
465 	ino = IB_MONDO_TO_INO(hdlp->ih_vector);
466 
467 	DEBUG3(DBG_A_INTX, dip, "pci_add_intr: rdip=%s%d ino=%x\n",
468 	    ddi_driver_name(rdip), ddi_get_instance(rdip), ino);
469 
470 	if (ino > ib_p->ib_max_ino) {
471 		DEBUG1(DBG_A_INTX, dip, "ino %x is invalid\n", ino);
472 		return (DDI_INTR_NOTFOUND);
473 	}
474 
475 	if (hdlp->ih_vector & PCI_PULSE_INO) {
476 		volatile uint64_t *map_reg_addr;
477 		map_reg_addr = ib_intr_map_reg_addr(ib_p, ino);
478 
479 		mondo = pci_xlate_intr(dip, rdip, ib_p, ino);
480 		if (mondo == 0)
481 			goto fail1;
482 
483 		hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo);
484 
485 		if (i_ddi_add_ivintr(hdlp) != DDI_SUCCESS)
486 			goto fail1;
487 
488 		/*
489 		 * Select cpu and program.
490 		 *
491 		 * Since there is no good way to always derive cpuid in
492 		 * pci_remove_intr for PCI_PULSE_INO (esp. for STARFIRE), we
493 		 * don't add (or remove) device weight for pulsed interrupt
494 		 * sources.
495 		 */
496 		mutex_enter(&ib_p->ib_intr_lock);
497 		cpu_id = intr_dist_cpuid();
498 		*map_reg_addr = ib_get_map_reg(mondo, cpu_id);
499 		mutex_exit(&ib_p->ib_intr_lock);
500 		*map_reg_addr;	/* flush previous write */
501 		goto done;
502 	}
503 
504 	if ((mondo = pci_xlate_intr(dip, rdip, pci_p->pci_ib_p, ino)) == 0)
505 		goto fail1;
506 
507 	ino = IB_MONDO_TO_INO(mondo);
508 
509 	mutex_enter(&ib_p->ib_ino_lst_mutex);
510 	ih_p = ib_alloc_ih(rdip, hdlp->ih_inum,
511 	    hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);
512 	if (map_pcidev_cfg_reg(dip, rdip, &ih_p->ih_config_handle))
513 		goto fail2;
514 
515 	ino_p = ib_locate_ino(ib_p, ino);
516 	ipil_list = ino_p ? ino_p->ino_ipil_p:NULL;
517 
518 	/* Sharing ino */
519 	if (ino_p && (ipil_p = ib_ino_locate_ipil(ino_p, hdlp->ih_pri))) {
520 		if (ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum)) {
521 			DEBUG1(DBG_A_INTX, dip, "dup intr #%d\n",
522 			    hdlp->ih_inum);
523 			goto fail3;
524 		}
525 
526 		/* add weight to the cpu that we are already targeting */
527 		cpu_id = ino_p->ino_cpuid;
528 		weight = pci_class_to_intr_weight(rdip);
529 		intr_dist_cpuid_add_device_weight(cpu_id, rdip, weight);
530 
531 		ib_ino_add_intr(pci_p, ipil_p, ih_p);
532 		goto ino_done;
533 	}
534 
535 	if (hdlp->ih_pri == 0)
536 		hdlp->ih_pri = pci_class_to_pil(rdip);
537 
538 	ipil_p = ib_new_ino_pil(ib_p, ino, hdlp->ih_pri, ih_p);
539 	ino_p = ipil_p->ipil_ino_p;
540 
541 	hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo);
542 
543 	/* Store this global mondo */
544 	ino_p->ino_mondo = hdlp->ih_vector;
545 
546 	DEBUG2(DBG_A_INTX, dip, "pci_add_intr:  pil=0x%x mondo=0x%x\n",
547 	    hdlp->ih_pri, hdlp->ih_vector);
548 
549 	DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp,
550 	    (ddi_intr_handler_t *)pci_intr_wrapper, (caddr_t)ipil_p, NULL);
551 
552 	ret = i_ddi_add_ivintr(hdlp);
553 
554 	/*
555 	 * Restore original interrupt handler
556 	 * and arguments in interrupt handle.
557 	 */
558 	DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler,
559 	    ih_p->ih_handler_arg1, ih_p->ih_handler_arg2);
560 
561 	if (ret != DDI_SUCCESS)
562 		goto fail4;
563 
564 	/* Save the pil for this ino */
565 	ipil_p->ipil_pil = hdlp->ih_pri;
566 
567 	/* clear and enable interrupt */
568 	IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);
569 
570 	/*
571 	 * Select cpu and compute weight, saving both for sharing and removal.
572 	 */
573 	if (ipil_list == NULL)
574 		ino_p->ino_cpuid = pci_intr_dist_cpuid(ib_p, ino_p);
575 
576 	cpu_id = ino_p->ino_cpuid;
577 	ino_p->ino_established = 1;
578 	weight = pci_class_to_intr_weight(rdip);
579 	intr_dist_cpuid_add_device_weight(cpu_id, rdip, weight);
580 
581 	if (!ipil_list) {
582 		*ino_p->ino_map_reg = ib_get_map_reg(mondo, cpu_id);
583 		*ino_p->ino_map_reg;
584 	}
585 ino_done:
586 	hdlp->ih_target = ino_p->ino_cpuid;
587 	ih_p->ih_ipil_p = ipil_p;
588 	ih_p->ih_ksp = kstat_create("pci_intrs",
589 	    atomic_inc_32_nv(&pciintr_ks_instance), "config", "interrupts",
590 	    KSTAT_TYPE_NAMED,
591 	    sizeof (pciintr_ks_template) / sizeof (kstat_named_t),
592 	    KSTAT_FLAG_VIRTUAL);
593 	if (ih_p->ih_ksp != NULL) {
594 		ih_p->ih_ksp->ks_data_size += MAXPATHLEN * 2;
595 		ih_p->ih_ksp->ks_lock = &pciintr_ks_template_lock;
596 		ih_p->ih_ksp->ks_data = &pciintr_ks_template;
597 		ih_p->ih_ksp->ks_private = ih_p;
598 		ih_p->ih_ksp->ks_update = pci_ks_update;
599 		kstat_install(ih_p->ih_ksp);
600 	}
601 	ib_ino_map_reg_share(ib_p, ino, ino_p);
602 	mutex_exit(&ib_p->ib_ino_lst_mutex);
603 done:
604 	DEBUG2(DBG_A_INTX, dip, "done! Interrupt 0x%x pil=%x\n",
605 	    hdlp->ih_vector, hdlp->ih_pri);
606 	return (DDI_SUCCESS);
607 fail4:
608 	ib_delete_ino_pil(ib_p, ipil_p);
609 fail3:
610 	if (ih_p->ih_config_handle)
611 		pci_config_teardown(&ih_p->ih_config_handle);
612 fail2:
613 	mutex_exit(&ib_p->ib_ino_lst_mutex);
614 	kmem_free(ih_p, sizeof (ih_t));
615 fail1:
616 	DEBUG2(DBG_A_INTX, dip, "Failed! Interrupt 0x%x pil=%x\n",
617 	    hdlp->ih_vector, hdlp->ih_pri);
618 	return (DDI_FAILURE);
619 }
620 
621 int
622 pci_remove_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp)
623 {
624 	pci_t		*pci_p = get_pci_soft_state(ddi_get_instance(dip));
625 	ib_t		*ib_p = pci_p->pci_ib_p;
626 	cb_t		*cb_p = pci_p->pci_cb_p;
627 	ib_ino_t	ino;
628 	ib_mondo_t	mondo;
629 	ib_ino_info_t	*ino_p;	/* non-pulse only */
630 	ib_ino_pil_t	*ipil_p; /* non-pulse only */
631 	ih_t		*ih_p;	/* non-pulse only */
632 
633 	ino = IB_MONDO_TO_INO(hdlp->ih_vector);
634 
635 	DEBUG3(DBG_R_INTX, dip, "pci_rem_intr: rdip=%s%d ino=%x\n",
636 	    ddi_driver_name(rdip), ddi_get_instance(rdip), ino);
637 
638 	if (hdlp->ih_vector & PCI_PULSE_INO) { /* pulse interrupt */
639 		volatile uint64_t *map_reg_addr;
640 
641 		/*
642 		 * No weight was added by pci_add_intr for PCI_PULSE_INO
643 		 * because it is difficult to determine cpuid here.
644 		 */
645 		map_reg_addr = ib_intr_map_reg_addr(ib_p, ino);
646 		IB_INO_INTR_RESET(map_reg_addr);	/* disable intr */
647 		*map_reg_addr;
648 
649 		mondo = pci_xlate_intr(dip, rdip, ib_p, ino);
650 		if (mondo == 0) {
651 			DEBUG1(DBG_R_INTX, dip,
652 			    "can't get mondo for ino %x\n", ino);
653 			return (DDI_FAILURE);
654 		}
655 
656 		if (hdlp->ih_pri == 0)
657 			hdlp->ih_pri = pci_class_to_pil(rdip);
658 
659 		hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo);
660 
661 		DEBUG2(DBG_R_INTX, dip, "pci_rem_intr: pil=0x%x mondo=0x%x\n",
662 		    hdlp->ih_pri, hdlp->ih_vector);
663 
664 		i_ddi_rem_ivintr(hdlp);
665 
666 		DEBUG2(DBG_R_INTX, dip, "pulse success mondo=%x reg=%p\n",
667 		    mondo, map_reg_addr);
668 		return (DDI_SUCCESS);
669 	}
670 
671 	/* Translate the interrupt property */
672 	mondo = pci_xlate_intr(dip, rdip, pci_p->pci_ib_p, ino);
673 	if (mondo == 0) {
674 		DEBUG1(DBG_R_INTX, dip, "can't get mondo for ino %x\n", ino);
675 		return (DDI_FAILURE);
676 	}
677 	ino = IB_MONDO_TO_INO(mondo);
678 
679 	mutex_enter(&ib_p->ib_ino_lst_mutex);
680 	ino_p = ib_locate_ino(ib_p, ino);
681 	if (!ino_p) {
682 		int r = cb_remove_xintr(pci_p, dip, rdip, ino, mondo);
683 		if (r != DDI_SUCCESS)
684 			cmn_err(CE_WARN, "%s%d-xintr: ino %x is invalid",
685 			    ddi_driver_name(dip), ddi_get_instance(dip), ino);
686 		mutex_exit(&ib_p->ib_ino_lst_mutex);
687 		return (r);
688 	}
689 
690 	ipil_p = ib_ino_locate_ipil(ino_p, hdlp->ih_pri);
691 	ih_p = ib_intr_locate_ih(ipil_p, rdip, hdlp->ih_inum);
692 	ib_ino_rem_intr(pci_p, ipil_p, ih_p);
693 	intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip);
694 	if (ipil_p->ipil_ih_size == 0) {
695 		IB_INO_INTR_PEND(ib_clear_intr_reg_addr(ib_p, ino));
696 		hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo);
697 
698 		i_ddi_rem_ivintr(hdlp);
699 		ib_delete_ino_pil(ib_p, ipil_p);
700 	}
701 
702 	/* re-enable interrupt only if mapping register still shared */
703 	if (ib_ino_map_reg_unshare(ib_p, ino, ino_p) || ino_p->ino_ipil_size) {
704 		IB_INO_INTR_ON(ino_p->ino_map_reg);
705 		*ino_p->ino_map_reg;
706 	}
707 	mutex_exit(&ib_p->ib_ino_lst_mutex);
708 
709 	if (ino_p->ino_ipil_size == 0)
710 		kmem_free(ino_p, sizeof (ib_ino_info_t));
711 
712 	DEBUG1(DBG_R_INTX, dip, "success! mondo=%x\n", mondo);
713 	return (DDI_SUCCESS);
714 }
715 
716 /*
717  * free the pci_inos array allocated during pci_intr_setup. the actual
718  * interrupts are torn down by their respective block destroy routines:
719  * cb_destroy, pbm_destroy, and ib_destroy.
720  */
721 void
722 pci_intr_teardown(pci_t *pci_p)
723 {
724 	kmem_free(pci_p->pci_inos, pci_p->pci_inos_len);
725 	pci_p->pci_inos = NULL;
726 	pci_p->pci_inos_len = 0;
727 }
728