xref: /titanic_41/usr/src/uts/sun4u/io/pci/pci_intr.c (revision ea8dc4b6d2251b437950c0056bc626b311c73c27)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * PCI nexus interrupt handling:
31  *	PCI device interrupt handler wrapper
32  *	pil lookup routine
33  *	PCI device interrupt related initchild code
34  */
35 
36 #include <sys/types.h>
37 #include <sys/kmem.h>
38 #include <sys/async.h>
39 #include <sys/spl.h>
40 #include <sys/sunddi.h>
41 #include <sys/machsystm.h>	/* e_ddi_nodeid_to_dip() */
42 #include <sys/ddi_impldefs.h>
43 #include <sys/pci/pci_obj.h>
44 #include <sys/sdt.h>
45 #include <sys/clock.h>
46 
47 #ifdef _STARFIRE
48 #include <sys/starfire.h>
49 #endif /* _STARFIRE */
50 
51 /*
52  * interrupt jabber:
53  *
54  * When an interrupt line is jabbering, every time the state machine for the
55  * associated ino is idled, a new mondo will be sent and the ino will go into
56  * the pending state again. The mondo will cause a new call to
57  * pci_intr_wrapper() which normally idles the ino's state machine which would
58  * precipitate another trip round the loop.
59  * The loop can be broken by preventing the ino's state machine from being
60  * idled when an interrupt line is jabbering. See the comment at the
61  * beginning of pci_intr_wrapper() explaining how the 'interrupt jabber
62  * protection' code does this.
63  */
64 
65 /*LINTLIBRARY*/
66 
67 #ifdef NOT_DEFINED
68 /*
69  * This array is used to determine the sparc PIL at the which the
70  * handler for a given INO will execute.  This table is for onboard
71  * devices only.  A different scheme will be used for plug-in cards.
72  */
73 
74 uint_t ino_to_pil[] = {
75 
76 	/* pil */		/* ino */
77 
78 	0, 0, 0, 0,  		/* 0x00 - 0x03: bus A slot 0 int#A, B, C, D */
79 	0, 0, 0, 0,		/* 0x04 - 0x07: bus A slot 1 int#A, B, C, D */
80 	0, 0, 0, 0,  		/* 0x08 - 0x0B: unused */
81 	0, 0, 0, 0,		/* 0x0C - 0x0F: unused */
82 
83 	0, 0, 0, 0,  		/* 0x10 - 0x13: bus B slot 0 int#A, B, C, D */
84 	0, 0, 0, 0,		/* 0x14 - 0x17: bus B slot 1 int#A, B, C, D */
85 	0, 0, 0, 0,  		/* 0x18 - 0x1B: bus B slot 2 int#A, B, C, D */
86 	4, 0, 0, 0,		/* 0x1C - 0x1F: bus B slot 3 int#A, B, C, D */
87 
88 	4,			/* 0x20: SCSI */
89 	6,			/* 0x21: ethernet */
90 	3,			/* 0x22: parallel port */
91 	9,			/* 0x23: audio record */
92 	9,			/* 0x24: audio playback */
93 	14,			/* 0x25: power fail */
94 	4,			/* 0x26: 2nd SCSI */
95 	8,			/* 0x27: floppy */
96 	14,			/* 0x28: thermal warning */
97 	12,			/* 0x29: keyboard */
98 	12,			/* 0x2A: mouse */
99 	12,			/* 0x2B: serial */
100 	0,			/* 0x2C: timer/counter 0 */
101 	0,			/* 0x2D: timer/counter 1 */
102 	14,			/* 0x2E: uncorrectable ECC errors */
103 	14,			/* 0x2F: correctable ECC errors */
104 	14,			/* 0x30: PCI bus A error */
105 	14,			/* 0x31: PCI bus B error */
106 	14,			/* 0x32: power management wakeup */
107 	14,			/* 0x33 */
108 	14,			/* 0x34 */
109 	14,			/* 0x35 */
110 	14,			/* 0x36 */
111 	14,			/* 0x37 */
112 	14,			/* 0x38 */
113 	14,			/* 0x39 */
114 	14,			/* 0x3a */
115 	14,			/* 0x3b */
116 	14,			/* 0x3c */
117 	14,			/* 0x3d */
118 	14,			/* 0x3e */
119 	14,			/* 0x3f */
120 	14			/* 0x40 */
121 };
122 #endif /* NOT_DEFINED */
123 
124 
125 #define	PCI_SIMBA_VENID		0x108e	/* vendor id for simba */
126 #define	PCI_SIMBA_DEVID		0x5000	/* device id for simba */
127 
128 /*
129  * map_pcidev_cfg_reg - create mapping to pci device configuration registers
130  *			if we have a simba AND a pci to pci bridge along the
131  *			device path.
132  *			Called with corresponding mutexes held!!
133  *
134  * XXX	  XXX	XXX	The purpose of this routine is to overcome a hardware
135  *			defect in Sabre CPU and Simba bridge configuration
136  *			which does not drain DMA write data stalled in
137  *			PCI to PCI bridges (such as the DEC bridge) beyond
138  *			Simba. This routine will setup the data structures
139  *			to allow the pci_intr_wrapper to perform a manual
140  *			drain data operation before passing the control to
141  *			interrupt handlers of device drivers.
142  * return value:
143  * DDI_SUCCESS
144  * DDI_FAILURE		if unable to create mapping
145  */
146 static int
147 map_pcidev_cfg_reg(dev_info_t *dip, dev_info_t *rdip, ddi_acc_handle_t *hdl_p)
148 {
149 	dev_info_t *cdip;
150 	dev_info_t *pci_dip = NULL;
151 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
152 	int simba_found = 0, pci_bridge_found = 0;
153 
154 	for (cdip = rdip; cdip && cdip != dip; cdip = ddi_get_parent(cdip)) {
155 		ddi_acc_handle_t config_handle;
156 		uint32_t vendor_id = ddi_getprop(DDI_DEV_T_ANY, cdip,
157 			DDI_PROP_DONTPASS, "vendor-id", 0xffff);
158 
159 		DEBUG4(DBG_A_INTX, pci_p->pci_dip,
160 			"map dev cfg reg for %s%d: @%s%d\n",
161 			ddi_driver_name(rdip), ddi_get_instance(rdip),
162 			ddi_driver_name(cdip), ddi_get_instance(cdip));
163 
164 		if (ddi_prop_exists(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
165 				"no-dma-interrupt-sync"))
166 			continue;
167 
168 		/* continue to search up-stream if not a PCI device */
169 		if (vendor_id == 0xffff)
170 			continue;
171 
172 		/* record the deepest pci device */
173 		if (!pci_dip)
174 			pci_dip = cdip;
175 
176 		/* look for simba */
177 		if (vendor_id == PCI_SIMBA_VENID) {
178 			uint32_t device_id = ddi_getprop(DDI_DEV_T_ANY,
179 			    cdip, DDI_PROP_DONTPASS, "device-id", -1);
180 			if (device_id == PCI_SIMBA_DEVID) {
181 				simba_found = 1;
182 				DEBUG0(DBG_A_INTX, pci_p->pci_dip,
183 					"\tFound simba\n");
184 				continue; /* do not check bridge if simba */
185 			}
186 		}
187 
188 		/* look for pci to pci bridge */
189 		if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS) {
190 			cmn_err(CE_WARN,
191 			    "%s%d: can't get brdg cfg space for %s%d\n",
192 				ddi_driver_name(dip), ddi_get_instance(dip),
193 				ddi_driver_name(cdip), ddi_get_instance(cdip));
194 			return (DDI_FAILURE);
195 		}
196 		if (pci_config_get8(config_handle, PCI_CONF_BASCLASS)
197 		    == PCI_CLASS_BRIDGE) {
198 			DEBUG0(DBG_A_INTX, pci_p->pci_dip,
199 				"\tFound PCI to xBus bridge\n");
200 			pci_bridge_found = 1;
201 		}
202 		pci_config_teardown(&config_handle);
203 	}
204 
205 	if (!pci_bridge_found)
206 		return (DDI_SUCCESS);
207 	if (!simba_found && (CHIP_TYPE(pci_p) < PCI_CHIP_SCHIZO))
208 		return (DDI_SUCCESS);
209 	if (pci_config_setup(pci_dip, hdl_p) != DDI_SUCCESS) {
210 		cmn_err(CE_WARN, "%s%d: can not get config space for %s%d\n",
211 			ddi_driver_name(dip), ddi_get_instance(dip),
212 			ddi_driver_name(cdip), ddi_get_instance(cdip));
213 		return (DDI_FAILURE);
214 	}
215 	return (DDI_SUCCESS);
216 }
217 
218 /*
219  * If the unclaimed interrupt count has reached the limit set by
220  * pci_unclaimed_intr_max within the time limit, then all interrupts
221  * on this ino is blocked by not idling the interrupt state machine.
222  */
223 static int
224 pci_spurintr(ib_ino_info_t *ino_p) {
225 	int i;
226 	ih_t *ih_p = ino_p->ino_ih_start;
227 	pci_t *pci_p = ino_p->ino_ib_p->ib_pci_p;
228 	char *err_fmt_str;
229 
230 	if (ino_p->ino_unclaimed > pci_unclaimed_intr_max)
231 		return (DDI_INTR_CLAIMED);
232 
233 	if (!ino_p->ino_unclaimed)
234 		ino_p->ino_spurintr_begin = ddi_get_lbolt();
235 
236 	ino_p->ino_unclaimed++;
237 
238 	if (ino_p->ino_unclaimed <= pci_unclaimed_intr_max)
239 		goto clear;
240 
241 	if (drv_hztousec(ddi_get_lbolt() - ino_p->ino_spurintr_begin)
242 	    > pci_spurintr_duration) {
243 		ino_p->ino_unclaimed = 0;
244 		goto clear;
245 	}
246 	err_fmt_str = "%s%d: ino 0x%x blocked";
247 	goto warn;
248 clear:
249 	IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);  /* clear the pending state */
250 	if (!pci_spurintr_msgs) /* tomatillo errata #71 spurious mondo */
251 		return (DDI_INTR_CLAIMED);
252 
253 	err_fmt_str = "!%s%d: spurious interrupt from ino 0x%x";
254 warn:
255 	cmn_err(CE_WARN, err_fmt_str, NAMEINST(pci_p->pci_dip), ino_p->ino_ino);
256 	for (i = 0; i < ino_p->ino_ih_size; i++, ih_p = ih_p->ih_next)
257 		cmn_err(CE_CONT, "!%s-%d#%x ", NAMEINST(ih_p->ih_dip),
258 		    ih_p->ih_inum);
259 	cmn_err(CE_CONT, "!\n");
260 	return (DDI_INTR_CLAIMED);
261 }
262 
263 /*
264  * pci_intr_wrapper
265  *
266  * This routine is used as wrapper around interrupt handlers installed by child
267  * device drivers.  This routine invokes the driver interrupt handlers and
268  * examines the return codes.
269  * There is a count of unclaimed interrupts kept on a per-ino basis. If at
270  * least one handler claims the interrupt then the counter is halved and the
271  * interrupt state machine is idled. If no handler claims the interrupt then
272  * the counter is incremented by one and the state machine is idled.
273  * If the count ever reaches the limit value set by pci_unclaimed_intr_max
274  * then the interrupt state machine is not idled thus preventing any further
275  * interrupts on that ino. The state machine will only be idled again if a
276  * handler is subsequently added or removed.
277  *
278  * return value: DDI_INTR_CLAIMED if any handlers claimed the interrupt,
279  * DDI_INTR_UNCLAIMED otherwise.
280  */
281 
282 extern uint64_t intr_get_time(void);
283 
284 uint_t
285 pci_intr_wrapper(caddr_t arg)
286 {
287 	ib_ino_info_t *ino_p = (ib_ino_info_t *)arg;
288 	uint_t result = 0, r;
289 	pci_t *pci_p = ino_p->ino_ib_p->ib_pci_p;
290 	pbm_t *pbm_p = pci_p->pci_pbm_p;
291 	ih_t *ih_p = ino_p->ino_ih_start;
292 	int i;
293 
294 	for (i = 0; i < ino_p->ino_ih_size; i++, ih_p = ih_p->ih_next) {
295 		dev_info_t *dip = ih_p->ih_dip;
296 		uint_t (*handler)() = ih_p->ih_handler;
297 		caddr_t arg1 = ih_p->ih_handler_arg1;
298 		caddr_t arg2 = ih_p->ih_handler_arg2;
299 		ddi_acc_handle_t cfg_hdl = ih_p->ih_config_handle;
300 
301 		if (pci_intr_dma_sync && cfg_hdl && pbm_p->pbm_sync_reg_pa) {
302 			(void) pci_config_get16(cfg_hdl, PCI_CONF_VENID);
303 			pci_pbm_dma_sync(pbm_p, ino_p->ino_ino);
304 		}
305 
306 		if (ih_p->ih_intr_state == PCI_INTR_STATE_DISABLE) {
307 			DEBUG3(DBG_INTR, pci_p->pci_dip,
308 			    "pci_intr_wrapper: %s%d interrupt %d is disabled\n",
309 			    ddi_driver_name(dip), ddi_get_instance(dip),
310 			    ino_p->ino_ino);
311 
312 			continue;
313 		}
314 
315 		DTRACE_PROBE4(interrupt__start, dev_info_t, dip,
316 		    void *, handler, caddr_t, arg1, caddr_t, arg2);
317 
318 		r = (*handler)(arg1, arg2);
319 
320 		/*
321 		 * Account for time used by this interrupt. Protect against
322 		 * conflicting writes to ih_ticks from ib_intr_dist_all() by
323 		 * using atomic ops.
324 		 */
325 
326 		if (ino_p->ino_pil <= LOCK_LEVEL)
327 			atomic_add_64(&ih_p->ih_ticks, intr_get_time());
328 
329 		DTRACE_PROBE4(interrupt__complete, dev_info_t, dip,
330 		    void *, handler, caddr_t, arg1, int, r);
331 
332 		result += r;
333 
334 		if (pci_check_all_handlers)
335 			continue;
336 		if (result)
337 			break;
338 	}
339 
340 	if (!result)
341 		return (pci_spurintr(ino_p));
342 
343 	ino_p->ino_unclaimed = 0;
344 	IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);  /* clear the pending state */
345 
346 	return (DDI_INTR_CLAIMED);
347 }
348 
349 dev_info_t *
350 get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip)
351 {
352 	dev_info_t *cdip = rdip;
353 
354 	for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip))
355 		;
356 
357 	return (cdip);
358 }
359 
360 /* default class to pil value mapping */
361 pci_class_val_t pci_default_pil [] = {
362 	{0x000000, 0xff0000, 0x1},	/* Class code for pre-2.0 devices */
363 	{0x010000, 0xff0000, 0x4},	/* Mass Storage Controller */
364 	{0x020000, 0xff0000, 0x6},	/* Network Controller */
365 	{0x030000, 0xff0000, 0x9},	/* Display Controller */
366 	{0x040000, 0xff0000, 0x9},	/* Multimedia Controller */
367 	{0x050000, 0xff0000, 0xb},	/* Memory Controller */
368 	{0x060000, 0xff0000, 0xb},	/* Bridge Controller */
369 	{0x0c0000, 0xffff00, 0x9},	/* Serial Bus, FireWire (IEEE 1394) */
370 	{0x0c0100, 0xffff00, 0x4},	/* Serial Bus, ACCESS.bus */
371 	{0x0c0200, 0xffff00, 0x4},	/* Serial Bus, SSA */
372 	{0x0c0300, 0xffff00, 0x9},	/* Serial Bus Universal Serial Bus */
373 	{0x0c0400, 0xffff00, 0x6},	/* Serial Bus, Fibre Channel */
374 	{0x0c0600, 0xffff00, 0x6}	/* Serial Bus, Infiniband */
375 };
376 
377 /*
378  * Default class to intr_weight value mapping (% of CPU).  A driver.conf
379  * entry on or above the pci node like
380  *
381  *	pci-class-intr-weights= 0x020000, 0xff0000, 30;
382  *
383  * can be used to augment or override entries in the default table below.
384  *
385  * NB: The values below give NICs preference on redistribution, and provide
386  * NICs some isolation from other interrupt sources. We need better interfaces
387  * that allow the NIC driver to identify a specific NIC instance as high
388  * bandwidth, and thus deserving of separation from other low bandwidth
389  * NICs additional isolation from other interrupt sources.
390  *
391  * NB: We treat Infiniband like a NIC.
392  */
393 pci_class_val_t pci_default_intr_weight [] = {
394 	{0x020000, 0xff0000, 35},	/* Network Controller */
395 	{0x010000, 0xff0000, 10},	/* Mass Storage Controller */
396 	{0x0c0400, 0xffff00, 10},	/* Serial Bus, Fibre Channel */
397 	{0x0c0600, 0xffff00, 50}	/* Serial Bus, Infiniband */
398 };
399 
400 static uint32_t
401 pci_match_class_val(uint32_t key, pci_class_val_t *rec_p, int nrec,
402     uint32_t default_val)
403 {
404 	int i;
405 
406 	for (i = 0; i < nrec; rec_p++, i++) {
407 		if ((rec_p->class_code & rec_p->class_mask) ==
408 		    (key & rec_p->class_mask))
409 			return (rec_p->class_val);
410 	}
411 
412 	return (default_val);
413 }
414 
415 /*
416  * Return the configuration value, based on class code and sub class code,
417  * from the specified property based or default pci_class_val_t table.
418  */
419 uint32_t
420 pci_class_to_val(dev_info_t *rdip, char *property_name, pci_class_val_t *rec_p,
421     int nrec, uint32_t default_val)
422 {
423 	int property_len;
424 	uint32_t class_code;
425 	pci_class_val_t *conf;
426 	uint32_t val = default_val;
427 
428 	/*
429 	 * Use the "class-code" property to get the base and sub class
430 	 * codes for the requesting device.
431 	 */
432 	class_code = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, rdip,
433 	    DDI_PROP_DONTPASS, "class-code", -1);
434 
435 	if (class_code == -1)
436 		return (val);
437 
438 	/* look up the val from the default table */
439 	val = pci_match_class_val(class_code, rec_p, nrec, val);
440 
441 
442 	/* see if there is a more specific property specified value */
443 	if (ddi_getlongprop(DDI_DEV_T_ANY, rdip, DDI_PROP_NOTPROM,
444 	    property_name, (caddr_t)&conf, &property_len))
445 			return (val);
446 
447 	if ((property_len % sizeof (pci_class_val_t)) == 0)
448 		val = pci_match_class_val(class_code, conf,
449 		    property_len / sizeof (pci_class_val_t), val);
450 	kmem_free(conf, property_len);
451 	return (val);
452 }
453 
454 /* pci_class_to_pil: return the pil for a given PCI device. */
455 uint32_t
456 pci_class_to_pil(dev_info_t *rdip)
457 {
458 	uint32_t pil;
459 
460 	/* default pil is 0 (uninitialized) */
461 	pil = pci_class_to_val(rdip,
462 	    "pci-class-priorities", pci_default_pil,
463 	    sizeof (pci_default_pil) / sizeof (pci_class_val_t), 0);
464 
465 	/* range check the result */
466 	if (pil >= 0xf)
467 		pil = 0;
468 
469 	return (pil);
470 }
471 
472 /* pci_class_to_intr_weight: return the intr_weight for a given PCI device. */
473 int32_t
474 pci_class_to_intr_weight(dev_info_t *rdip)
475 {
476 	int32_t intr_weight;
477 
478 	/* default weight is 0% */
479 	intr_weight = pci_class_to_val(rdip,
480 	    "pci-class-intr-weights", pci_default_intr_weight,
481 	    sizeof (pci_default_intr_weight) / sizeof (pci_class_val_t), 0);
482 
483 	/* range check the result */
484 	if (intr_weight < 0)
485 		intr_weight = 0;
486 	if (intr_weight > 1000)
487 		intr_weight = 1000;
488 
489 	return (intr_weight);
490 }
491 
492 static struct {
493 	kstat_named_t pciintr_ks_name;
494 	kstat_named_t pciintr_ks_type;
495 	kstat_named_t pciintr_ks_cpu;
496 	kstat_named_t pciintr_ks_pil;
497 	kstat_named_t pciintr_ks_time;
498 	kstat_named_t pciintr_ks_ino;
499 	kstat_named_t pciintr_ks_cookie;
500 	kstat_named_t pciintr_ks_devpath;
501 	kstat_named_t pciintr_ks_buspath;
502 } pciintr_ks_template = {
503 	{ "name",	KSTAT_DATA_CHAR },
504 	{ "type",	KSTAT_DATA_CHAR },
505 	{ "cpu",	KSTAT_DATA_UINT64 },
506 	{ "pil",	KSTAT_DATA_UINT64 },
507 	{ "time",	KSTAT_DATA_UINT64 },
508 	{ "ino",	KSTAT_DATA_UINT64 },
509 	{ "cookie",	KSTAT_DATA_UINT64 },
510 	{ "devpath",	KSTAT_DATA_STRING },
511 	{ "buspath",	KSTAT_DATA_STRING },
512 };
513 static uint32_t pciintr_ks_instance;
514 
515 kmutex_t pciintr_ks_template_lock;
516 
517 int
518 pci_ks_update(kstat_t *ksp, int rw)
519 {
520 	ih_t *ih_p = ksp->ks_private;
521 	int maxlen = sizeof (pciintr_ks_template.pciintr_ks_name.value.c);
522 	ib_t *ib_p = ih_p->ih_ino_p->ino_ib_p;
523 	pci_t *pci_p = ib_p->ib_pci_p;
524 	ib_ino_t ino;
525 	char ih_devpath[MAXPATHLEN];
526 	char ih_buspath[MAXPATHLEN];
527 
528 	ino = ih_p->ih_ino_p->ino_ino;
529 
530 	(void) snprintf(pciintr_ks_template.pciintr_ks_name.value.c, maxlen,
531 	    "%s%d", ddi_driver_name(ih_p->ih_dip),
532 	    ddi_get_instance(ih_p->ih_dip));
533 
534 	(void) ddi_pathname(ih_p->ih_dip, ih_devpath);
535 	(void) ddi_pathname(pci_p->pci_dip, ih_buspath);
536 	kstat_named_setstr(&pciintr_ks_template.pciintr_ks_devpath, ih_devpath);
537 	kstat_named_setstr(&pciintr_ks_template.pciintr_ks_buspath, ih_buspath);
538 
539 	if (ih_p->ih_intr_state == PCI_INTR_STATE_ENABLE) {
540 		(void) strcpy(pciintr_ks_template.pciintr_ks_type.value.c,
541 		    "fixed");
542 		pciintr_ks_template.pciintr_ks_cpu.value.ui64 =
543 		    ih_p->ih_ino_p->ino_cpuid;
544 		pciintr_ks_template.pciintr_ks_pil.value.ui64 =
545 		    ih_p->ih_ino_p->ino_pil;
546 		pciintr_ks_template.pciintr_ks_time.value.ui64 = ih_p->ih_nsec +
547 		    (uint64_t)tick2ns((hrtime_t)ih_p->ih_ticks,
548 			ih_p->ih_ino_p->ino_cpuid);
549 		pciintr_ks_template.pciintr_ks_ino.value.ui64 = ino;
550 		pciintr_ks_template.pciintr_ks_cookie.value.ui64 =
551 			IB_INO_TO_MONDO(ib_p, ino);
552 	} else {
553 		(void) strcpy(pciintr_ks_template.pciintr_ks_type.value.c,
554 		    "disabled");
555 		pciintr_ks_template.pciintr_ks_cpu.value.ui64 = 0;
556 		pciintr_ks_template.pciintr_ks_pil.value.ui64 = 0;
557 		pciintr_ks_template.pciintr_ks_time.value.ui64 = 0;
558 		pciintr_ks_template.pciintr_ks_ino.value.ui64 = 0;
559 		pciintr_ks_template.pciintr_ks_cookie.value.ui64 = 0;
560 	}
561 
562 	return (0);
563 }
564 
565 int
566 pci_add_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp)
567 {
568 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
569 	ib_t *ib_p = pci_p->pci_ib_p;
570 	cb_t *cb_p = pci_p->pci_cb_p;
571 	ih_t *ih_p;
572 	ib_ino_t ino;
573 	ib_ino_info_t *ino_p;		/* pulse interrupts have no ino */
574 	ib_mondo_t mondo;
575 	uint32_t cpu_id;
576 	int ret;
577 	int32_t weight;
578 
579 	ino = IB_MONDO_TO_INO(hdlp->ih_vector);
580 
581 	DEBUG3(DBG_A_INTX, dip, "pci_add_intr: rdip=%s%d ino=%x\n",
582 	    ddi_driver_name(rdip), ddi_get_instance(rdip), ino);
583 
584 	if (ino > ib_p->ib_max_ino) {
585 		DEBUG1(DBG_A_INTX, dip, "ino %x is invalid\n", ino);
586 		return (DDI_INTR_NOTFOUND);
587 	}
588 
589 	if (hdlp->ih_vector & PCI_PULSE_INO) {
590 		volatile uint64_t *map_reg_addr;
591 		map_reg_addr = ib_intr_map_reg_addr(ib_p, ino);
592 
593 		mondo = pci_xlate_intr(dip, rdip, ib_p, ino);
594 		if (mondo == 0)
595 			goto fail1;
596 
597 		hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo);
598 
599 		if (i_ddi_add_ivintr(hdlp) != DDI_SUCCESS)
600 			goto fail1;
601 
602 		/*
603 		 * Select cpu and program.
604 		 *
605 		 * Since there is no good way to always derive cpuid in
606 		 * pci_remove_intr for PCI_PULSE_INO (esp. for STARFIRE), we
607 		 * don't add (or remove) device weight for pulsed interrupt
608 		 * sources.
609 		 */
610 		mutex_enter(&ib_p->ib_intr_lock);
611 		cpu_id = intr_dist_cpuid();
612 		*map_reg_addr = ib_get_map_reg(mondo, cpu_id);
613 		mutex_exit(&ib_p->ib_intr_lock);
614 		*map_reg_addr;	/* flush previous write */
615 		goto done;
616 	}
617 
618 	if ((mondo = pci_xlate_intr(dip, rdip, pci_p->pci_ib_p, ino)) == 0)
619 		goto fail1;
620 
621 	ino = IB_MONDO_TO_INO(mondo);
622 
623 	mutex_enter(&ib_p->ib_ino_lst_mutex);
624 	ih_p = ib_alloc_ih(rdip, hdlp->ih_inum,
625 	    hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);
626 	if (map_pcidev_cfg_reg(dip, rdip, &ih_p->ih_config_handle))
627 		goto fail2;
628 
629 	if (ino_p = ib_locate_ino(ib_p, ino)) {		/* sharing ino */
630 		uint32_t intr_index = hdlp->ih_inum;
631 		if (ib_ino_locate_intr(ino_p, rdip, intr_index)) {
632 			DEBUG1(DBG_A_INTX, dip, "dup intr #%d\n", intr_index);
633 			goto fail3;
634 		}
635 
636 		/* add weight to the cpu that we are already targeting */
637 		cpu_id = ino_p->ino_cpuid;
638 		weight = pci_class_to_intr_weight(rdip);
639 		intr_dist_cpuid_add_device_weight(cpu_id, rdip, weight);
640 
641 		ib_ino_add_intr(pci_p, ino_p, ih_p);
642 		goto ino_done;
643 	}
644 
645 	ino_p = ib_new_ino(ib_p, ino, ih_p);
646 
647 	if (hdlp->ih_pri == 0)
648 		hdlp->ih_pri = pci_class_to_pil(rdip);
649 
650 	hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo);
651 
652 	/* Store this global mondo */
653 	ino_p->ino_mondo = hdlp->ih_vector;
654 
655 	DEBUG2(DBG_A_INTX, dip, "pci_add_intr:  pil=0x%x mondo=0x%x\n",
656 	    hdlp->ih_pri, hdlp->ih_vector);
657 
658 	DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp,
659 	    (ddi_intr_handler_t *)pci_intr_wrapper, (caddr_t)ino_p, NULL);
660 
661 	ret = i_ddi_add_ivintr(hdlp);
662 
663 	/*
664 	 * Restore original interrupt handler
665 	 * and arguments in interrupt handle.
666 	 */
667 	DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler,
668 	    ih_p->ih_handler_arg1, ih_p->ih_handler_arg2);
669 
670 	if (ret != DDI_SUCCESS)
671 		goto fail4;
672 
673 	/* Save the pil for this ino */
674 	ino_p->ino_pil = hdlp->ih_pri;
675 
676 	/* clear and enable interrupt */
677 	IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);
678 
679 	/* select cpu and compute weight, saving both for sharing and removal */
680 	cpu_id = pci_intr_dist_cpuid(ib_p, ino_p);
681 	ino_p->ino_cpuid = cpu_id;
682 	ino_p->ino_established = 1;
683 	weight = pci_class_to_intr_weight(rdip);
684 	intr_dist_cpuid_add_device_weight(cpu_id, rdip, weight);
685 
686 #ifdef _STARFIRE
687 	cpu_id = pc_translate_tgtid(cb_p->cb_ittrans_cookie, cpu_id,
688 		IB_GET_MAPREG_INO(ino));
689 #endif /* _STARFIRE */
690 	*ino_p->ino_map_reg = ib_get_map_reg(mondo, cpu_id);
691 	*ino_p->ino_map_reg;
692 ino_done:
693 	ih_p->ih_ino_p = ino_p;
694 	ih_p->ih_ksp = kstat_create("pci_intrs",
695 	    atomic_inc_32_nv(&pciintr_ks_instance), "config", "interrupts",
696 	    KSTAT_TYPE_NAMED,
697 	    sizeof (pciintr_ks_template) / sizeof (kstat_named_t),
698 	    KSTAT_FLAG_VIRTUAL);
699 	if (ih_p->ih_ksp != NULL) {
700 		ih_p->ih_ksp->ks_data_size += MAXPATHLEN * 2;
701 		ih_p->ih_ksp->ks_lock = &pciintr_ks_template_lock;
702 		ih_p->ih_ksp->ks_data = &pciintr_ks_template;
703 		ih_p->ih_ksp->ks_private = ih_p;
704 		ih_p->ih_ksp->ks_update = pci_ks_update;
705 		kstat_install(ih_p->ih_ksp);
706 	}
707 	ib_ino_map_reg_share(ib_p, ino, ino_p);
708 	mutex_exit(&ib_p->ib_ino_lst_mutex);
709 done:
710 	DEBUG2(DBG_A_INTX, dip, "done! Interrupt 0x%x pil=%x\n",
711 		hdlp->ih_vector, hdlp->ih_pri);
712 	return (DDI_SUCCESS);
713 fail4:
714 	ib_delete_ino(ib_p, ino_p);
715 fail3:
716 	if (ih_p->ih_config_handle)
717 		pci_config_teardown(&ih_p->ih_config_handle);
718 fail2:
719 	mutex_exit(&ib_p->ib_ino_lst_mutex);
720 	kmem_free(ih_p, sizeof (ih_t));
721 fail1:
722 	DEBUG2(DBG_A_INTX, dip, "Failed! Interrupt 0x%x pil=%x\n",
723 		hdlp->ih_vector, hdlp->ih_pri);
724 	return (DDI_FAILURE);
725 }
726 
727 int
728 pci_remove_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp)
729 {
730 	pci_t *pci_p = get_pci_soft_state(ddi_get_instance(dip));
731 	ib_t *ib_p = pci_p->pci_ib_p;
732 	cb_t *cb_p = pci_p->pci_cb_p;
733 	ib_ino_t ino;
734 	ib_mondo_t mondo;
735 	ib_ino_info_t *ino_p;	/* non-pulse only */
736 	ih_t *ih_p;		/* non-pulse only */
737 
738 	ino = IB_MONDO_TO_INO(hdlp->ih_vector);
739 
740 	DEBUG3(DBG_R_INTX, dip, "pci_rem_intr: rdip=%s%d ino=%x\n",
741 	    ddi_driver_name(rdip), ddi_get_instance(rdip), ino);
742 
743 	if (hdlp->ih_vector & PCI_PULSE_INO) { /* pulse interrupt */
744 		volatile uint64_t *map_reg_addr;
745 
746 		/*
747 		 * No weight was added by pci_add_intr for PCI_PULSE_INO
748 		 * because it is difficult to determine cpuid here.
749 		 */
750 		map_reg_addr = ib_intr_map_reg_addr(ib_p, ino);
751 		IB_INO_INTR_RESET(map_reg_addr);	/* disable intr */
752 		*map_reg_addr;
753 
754 		mondo = pci_xlate_intr(dip, rdip, ib_p, ino);
755 		if (mondo == 0) {
756 			DEBUG1(DBG_R_INTX, dip,
757 				"can't get mondo for ino %x\n", ino);
758 			return (DDI_FAILURE);
759 		}
760 
761 		if (hdlp->ih_pri == 0)
762 			hdlp->ih_pri = pci_class_to_pil(rdip);
763 
764 		hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo);
765 
766 		DEBUG2(DBG_R_INTX, dip, "pci_rem_intr: pil=0x%x mondo=0x%x\n",
767 		    hdlp->ih_pri, hdlp->ih_vector);
768 
769 		i_ddi_rem_ivintr(hdlp);
770 
771 		DEBUG2(DBG_R_INTX, dip, "pulse success mondo=%x reg=%p\n",
772 			mondo, map_reg_addr);
773 		return (DDI_SUCCESS);
774 	}
775 
776 	/* Translate the interrupt property */
777 	mondo = pci_xlate_intr(dip, rdip, pci_p->pci_ib_p, ino);
778 	if (mondo == 0) {
779 		DEBUG1(DBG_R_INTX, dip, "can't get mondo for ino %x\n", ino);
780 		return (DDI_FAILURE);
781 	}
782 	ino = IB_MONDO_TO_INO(mondo);
783 
784 	mutex_enter(&ib_p->ib_ino_lst_mutex);
785 	ino_p = ib_locate_ino(ib_p, ino);
786 	if (!ino_p) {
787 		int r = cb_remove_xintr(pci_p, dip, rdip, ino, mondo);
788 		if (r != DDI_SUCCESS)
789 			cmn_err(CE_WARN, "%s%d-xintr: ino %x is invalid",
790 			    ddi_driver_name(dip), ddi_get_instance(dip), ino);
791 		mutex_exit(&ib_p->ib_ino_lst_mutex);
792 		return (r);
793 	}
794 
795 	ih_p = ib_ino_locate_intr(ino_p, rdip, hdlp->ih_inum);
796 	ib_ino_rem_intr(pci_p, ino_p, ih_p);
797 	intr_dist_cpuid_rem_device_weight(ino_p->ino_cpuid, rdip);
798 	if (ino_p->ino_ih_size == 0) {
799 		IB_INO_INTR_PEND(ib_clear_intr_reg_addr(ib_p, ino));
800 		hdlp->ih_vector = CB_MONDO_TO_XMONDO(cb_p, mondo);
801 		if (hdlp->ih_pri == 0)
802 			hdlp->ih_pri = pci_class_to_pil(rdip);
803 
804 		i_ddi_rem_ivintr(hdlp);
805 		ib_delete_ino(ib_p, ino_p);
806 	}
807 
808 	/* re-enable interrupt only if mapping register still shared */
809 	if (ib_ino_map_reg_unshare(ib_p, ino, ino_p)) {
810 		IB_INO_INTR_ON(ino_p->ino_map_reg);
811 		*ino_p->ino_map_reg;
812 	}
813 	mutex_exit(&ib_p->ib_ino_lst_mutex);
814 
815 	if (ino_p->ino_ih_size == 0)
816 		kmem_free(ino_p, sizeof (ib_ino_info_t));
817 
818 	DEBUG1(DBG_R_INTX, dip, "success! mondo=%x\n", mondo);
819 	return (DDI_SUCCESS);
820 }
821 
822 /*
823  * free the pci_inos array allocated during pci_intr_setup. the actual
824  * interrupts are torn down by their respective block destroy routines:
825  * cb_destroy, pbm_destroy, and ib_destroy.
826  */
827 void
828 pci_intr_teardown(pci_t *pci_p)
829 {
830 	kmem_free(pci_p->pci_inos, pci_p->pci_inos_len);
831 	pci_p->pci_inos = NULL;
832 	pci_p->pci_inos_len = 0;
833 }
834