xref: /illumos-gate/usr/src/uts/sun4u/io/pci/pci_ib.c (revision 5435d801b2b4c6124787e114cb1aa677427d3d81)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * PCI Interrupt Block (RISCx) implementation
31  *	initialization
32  *	interrupt enable/disable/clear and mapping register manipulation
33  */
34 
35 #include <sys/types.h>
36 #include <sys/kmem.h>
37 #include <sys/async.h>
38 #include <sys/systm.h>		/* panicstr */
39 #include <sys/spl.h>
40 #include <sys/sunddi.h>
41 #include <sys/machsystm.h>	/* intr_dist_add */
42 #include <sys/ddi_impldefs.h>
43 #include <sys/clock.h>
44 #include <sys/cpuvar.h>
45 #include <sys/pci/pci_obj.h>
46 
47 #ifdef _STARFIRE
48 #include <sys/starfire.h>
49 #endif /* _STARFIRE */
50 
51 /*LINTLIBRARY*/
52 static uint_t ib_intr_reset(void *arg);
53 
54 void
55 ib_create(pci_t *pci_p)
56 {
57 	dev_info_t *dip = pci_p->pci_dip;
58 	ib_t *ib_p;
59 	uintptr_t a;
60 	int i;
61 
62 	/*
63 	 * Allocate interrupt block state structure and link it to
64 	 * the pci state structure.
65 	 */
66 	ib_p = kmem_zalloc(sizeof (ib_t), KM_SLEEP);
67 	pci_p->pci_ib_p = ib_p;
68 	ib_p->ib_pci_p = pci_p;
69 
70 	a = pci_ib_setup(ib_p);
71 
72 	/*
73 	 * Determine virtual addresses of interrupt mapping, clear and diag
74 	 * registers that have common offsets.
75 	 */
76 	ib_p->ib_slot_clear_intr_regs =
77 		a + COMMON_IB_SLOT_CLEAR_INTR_REG_OFFSET;
78 	ib_p->ib_intr_retry_timer_reg =
79 		(uint64_t *)(a + COMMON_IB_INTR_RETRY_TIMER_OFFSET);
80 	ib_p->ib_slot_intr_state_diag_reg =
81 		(uint64_t *)(a + COMMON_IB_SLOT_INTR_STATE_DIAG_REG);
82 	ib_p->ib_obio_intr_state_diag_reg =
83 		(uint64_t *)(a + COMMON_IB_OBIO_INTR_STATE_DIAG_REG);
84 
85 	if (CHIP_TYPE(pci_p) != PCI_CHIP_XMITS) {
86 		ib_p->ib_upa_imr[0] = (volatile uint64_t *)
87 				(a + COMMON_IB_UPA0_INTR_MAP_REG_OFFSET);
88 		ib_p->ib_upa_imr[1] = (volatile uint64_t *)
89 				(a + COMMON_IB_UPA1_INTR_MAP_REG_OFFSET);
90 	}
91 
92 	DEBUG2(DBG_ATTACH, dip, "ib_create: slot_imr=%x, slot_cir=%x\n",
93 		ib_p->ib_slot_intr_map_regs, ib_p->ib_obio_intr_map_regs);
94 	DEBUG2(DBG_ATTACH, dip, "ib_create: obio_imr=%x, obio_cir=%x\n",
95 		ib_p->ib_slot_clear_intr_regs, ib_p->ib_obio_clear_intr_regs);
96 	DEBUG2(DBG_ATTACH, dip, "ib_create: upa0_imr=%x, upa1_imr=%x\n",
97 		ib_p->ib_upa_imr[0], ib_p->ib_upa_imr[1]);
98 	DEBUG3(DBG_ATTACH, dip,
99 		"ib_create: retry_timer=%x, obio_diag=%x slot_diag=%x\n",
100 		ib_p->ib_intr_retry_timer_reg,
101 		ib_p->ib_obio_intr_state_diag_reg,
102 		ib_p->ib_slot_intr_state_diag_reg);
103 
104 	ib_p->ib_ino_lst = (ib_ino_info_t *)NULL;
105 	mutex_init(&ib_p->ib_intr_lock, NULL, MUTEX_DRIVER, NULL);
106 	mutex_init(&ib_p->ib_ino_lst_mutex, NULL, MUTEX_DRIVER, NULL);
107 
108 	DEBUG1(DBG_ATTACH, dip, "ib_create: numproxy=%x\n",
109 		pci_p->pci_numproxy);
110 	for (i = 1; i <= pci_p->pci_numproxy; i++) {
111 		set_intr_mapping_reg(pci_p->pci_id,
112 			(uint64_t *)ib_p->ib_upa_imr[i - 1], i);
113 	}
114 
115 	ib_configure(ib_p);
116 	bus_func_register(BF_TYPE_RESINTR, ib_intr_reset, ib_p);
117 }
118 
119 void
120 ib_destroy(pci_t *pci_p)
121 {
122 	ib_t *ib_p = pci_p->pci_ib_p;
123 	dev_info_t *dip = pci_p->pci_dip;
124 
125 	DEBUG0(DBG_IB, dip, "ib_destroy\n");
126 	bus_func_unregister(BF_TYPE_RESINTR, ib_intr_reset, ib_p);
127 
128 	intr_dist_rem_weighted(ib_intr_dist_all, ib_p);
129 	mutex_destroy(&ib_p->ib_ino_lst_mutex);
130 	mutex_destroy(&ib_p->ib_intr_lock);
131 
132 	ib_free_ino_all(ib_p);
133 
134 	kmem_free(ib_p, sizeof (ib_t));
135 	pci_p->pci_ib_p = NULL;
136 }
137 
138 void
139 ib_configure(ib_t *ib_p)
140 {
141 	/* XXX could be different between psycho and schizo */
142 	*ib_p->ib_intr_retry_timer_reg = pci_intr_retry_intv;
143 }
144 
145 /*
146  * can only used for psycho internal interrupts thermal, power,
147  * ue, ce, pbm
148  */
149 void
150 ib_intr_enable(pci_t *pci_p, ib_ino_t ino)
151 {
152 	ib_t *ib_p = pci_p->pci_ib_p;
153 	ib_mondo_t mondo = IB_INO_TO_MONDO(ib_p, ino);
154 	volatile uint64_t *imr_p = ib_intr_map_reg_addr(ib_p, ino);
155 	uint_t cpu_id;
156 
157 	/*
158 	 * Determine the cpu for the interrupt.
159 	 */
160 	mutex_enter(&ib_p->ib_intr_lock);
161 	cpu_id = intr_dist_cpuid();
162 #ifdef _STARFIRE
163 	cpu_id = pc_translate_tgtid(IB2CB(ib_p)->cb_ittrans_cookie, cpu_id,
164 		IB_GET_MAPREG_INO(ino));
165 #endif /* _STARFIRE */
166 	DEBUG2(DBG_IB, pci_p->pci_dip,
167 		"ib_intr_enable: ino=%x cpu_id=%x\n", ino, cpu_id);
168 
169 	*imr_p = ib_get_map_reg(mondo, cpu_id);
170 	IB_INO_INTR_CLEAR(ib_clear_intr_reg_addr(ib_p, ino));
171 	mutex_exit(&ib_p->ib_intr_lock);
172 }
173 
174 /*
175  * Disable the interrupt via its interrupt mapping register.
176  * Can only be used for internal interrupts: thermal, power, ue, ce, pbm.
177  * If called under interrupt context, wait should be set to 0
178  */
179 void
180 ib_intr_disable(ib_t *ib_p, ib_ino_t ino, int wait)
181 {
182 	volatile uint64_t *imr_p = ib_intr_map_reg_addr(ib_p, ino);
183 	volatile uint64_t *state_reg_p = IB_INO_INTR_STATE_REG(ib_p, ino);
184 	hrtime_t start_time;
185 
186 	/* disable the interrupt */
187 	mutex_enter(&ib_p->ib_intr_lock);
188 	IB_INO_INTR_OFF(imr_p);
189 	*imr_p;	/* flush previous write */
190 	mutex_exit(&ib_p->ib_intr_lock);
191 
192 	if (!wait)
193 		goto wait_done;
194 
195 	start_time = gethrtime();
196 	/* busy wait if there is interrupt being processed */
197 	while (IB_INO_INTR_PENDING(state_reg_p, ino) && !panicstr) {
198 		if (gethrtime() - start_time > pci_intrpend_timeout) {
199 			pbm_t *pbm_p = ib_p->ib_pci_p->pci_pbm_p;
200 			cmn_err(CE_WARN, "%s:%s: ib_intr_disable timeout %x",
201 				pbm_p->pbm_nameinst_str,
202 				pbm_p->pbm_nameaddr_str, ino);
203 				break;
204 		}
205 	}
206 wait_done:
207 	IB_INO_INTR_PEND(ib_clear_intr_reg_addr(ib_p, ino));
208 #ifdef _STARFIRE
209 	pc_ittrans_cleanup(IB2CB(ib_p)->cb_ittrans_cookie,
210 	    (volatile uint64_t *)ino);
211 #endif /* _STARFIRE */
212 }
213 
214 /* can only used for psycho internal interrupts thermal, power, ue, ce, pbm */
215 void
216 ib_nintr_clear(ib_t *ib_p, ib_ino_t ino)
217 {
218 	uint64_t *clr_reg = ib_clear_intr_reg_addr(ib_p, ino);
219 	IB_INO_INTR_CLEAR(clr_reg);
220 }
221 
222 /*
223  * distribute PBM and UPA interrupts. ino is set to 0 by caller if we
224  * are dealing with UPA interrupts (without inos).
225  */
226 void
227 ib_intr_dist_nintr(ib_t *ib_p, ib_ino_t ino, volatile uint64_t *imr_p)
228 {
229 	volatile uint64_t imr = *imr_p;
230 	uint32_t cpu_id;
231 
232 	if (!IB_INO_INTR_ISON(imr))
233 		return;
234 
235 	cpu_id = intr_dist_cpuid();
236 
237 #ifdef _STARFIRE
238 	if (ino) {
239 		cpu_id = pc_translate_tgtid(IB2CB(ib_p)->cb_ittrans_cookie,
240 			cpu_id, IB_GET_MAPREG_INO(ino));
241 	}
242 #else /* _STARFIRE */
243 	if (ib_map_reg_get_cpu(*imr_p) == cpu_id)
244 		return;
245 #endif /* _STARFIRE */
246 
247 	*imr_p = ib_get_map_reg(IB_IMR2MONDO(imr), cpu_id);
248 	imr = *imr_p;	/* flush previous write */
249 }
250 
251 static void
252 ib_intr_dist(ib_t *ib_p, ib_ino_info_t *ino_p)
253 {
254 	uint32_t cpu_id = ino_p->ino_cpuid;
255 	ib_ino_t ino = ino_p->ino_ino;
256 	volatile uint64_t imr, *imr_p, *state_reg;
257 	hrtime_t start_time;
258 
259 	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
260 	imr_p = ib_intr_map_reg_addr(ib_p, ino);
261 	state_reg = IB_INO_INTR_STATE_REG(ib_p, ino);
262 
263 #ifdef _STARFIRE
264 	/*
265 	 * For Starfire it is a pain to check the current target for
266 	 * the mondo since we have to read the PC asics ITTR slot
267 	 * assigned to this mondo. It will be much easier to assume
268 	 * the current target is always different and do the target
269 	 * reprogram all the time.
270 	 */
271 	cpu_id = pc_translate_tgtid(IB2CB(ib_p)->cb_ittrans_cookie, cpu_id,
272 		IB_GET_MAPREG_INO(ino));
273 #else
274 	if (ib_map_reg_get_cpu(*imr_p) == cpu_id) /* same cpu, no reprog */
275 		return;
276 #endif /* _STARFIRE */
277 
278 	/* disable interrupt, this could disrupt devices sharing our slot */
279 	IB_INO_INTR_OFF(imr_p);
280 	imr = *imr_p;	/* flush previous write */
281 
282 	/* busy wait if there is interrupt being processed */
283 	start_time = gethrtime();
284 	while (IB_INO_INTR_PENDING(state_reg, ino) && !panicstr) {
285 		if (gethrtime() - start_time > pci_intrpend_timeout) {
286 			pbm_t *pbm_p = ib_p->ib_pci_p->pci_pbm_p;
287 			cmn_err(CE_WARN, "%s:%s: ib_intr_dist(%p,%x) timeout",
288 				pbm_p->pbm_nameinst_str,
289 				pbm_p->pbm_nameaddr_str,
290 				imr_p, IB_INO_TO_MONDO(ib_p, ino));
291 			break;
292 		}
293 	}
294 	*imr_p = ib_get_map_reg(IB_IMR2MONDO(imr), cpu_id);
295 	imr = *imr_p;	/* flush previous write */
296 }
297 
298 /*
299  * Redistribute interrupts of the specified weight. The first call has a weight
300  * of weight_max, which can be used to trigger initialization for
301  * redistribution. The inos with weight [weight_max, inf.) should be processed
302  * on the "weight == weight_max" call.  This first call is followed by calls
303  * of decreasing weights, inos of that weight should be processed.  The final
304  * call specifies a weight of zero, this can be used to trigger processing of
305  * stragglers.
306  */
307 void
308 ib_intr_dist_all(void *arg, int32_t weight_max, int32_t weight)
309 {
310 	extern kmutex_t pciintr_ks_template_lock;
311 	ib_t *ib_p = (ib_t *)arg;
312 	pci_t *pci_p = ib_p->ib_pci_p;
313 	ib_ino_info_t *ino_p;
314 	ih_t *ih_lst;
315 	int32_t dweight;
316 	int i;
317 
318 	if (weight == 0) {
319 		mutex_enter(&ib_p->ib_intr_lock);
320 		if (CHIP_TYPE(pci_p) != PCI_CHIP_XMITS) {
321 			for (i = 0; i < 2; i++)
322 				ib_intr_dist_nintr(ib_p, 0,
323 				    ib_p->ib_upa_imr[i]);
324 		}
325 		mutex_exit(&ib_p->ib_intr_lock);
326 	}
327 
328 	mutex_enter(&ib_p->ib_ino_lst_mutex);
329 
330 	/* Perform special processing for first call of a redistribution. */
331 	if (weight == weight_max) {
332 		for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next) {
333 
334 			/*
335 			 * Clear ino_established of each ino on first call.
336 			 * The ino_established field may be used by a pci
337 			 * nexus driver's pci_intr_dist_cpuid implementation
338 			 * when detection of established pci slot-cpu binding
339 			 * for multi function pci cards.
340 			 */
341 			ino_p->ino_established = 0;
342 
343 			/*
344 			 * recompute the ino_intr_weight based on the device
345 			 * weight of all devinfo nodes sharing the ino (this
346 			 * will allow us to pick up new weights established by
347 			 * i_ddi_set_intr_weight()).
348 			 */
349 			ino_p->ino_intr_weight = 0;
350 			for (i = 0, ih_lst = ino_p->ino_ih_head;
351 			    i < ino_p->ino_ih_size;
352 			    i++, ih_lst = ih_lst->ih_next) {
353 				dweight = i_ddi_get_intr_weight(ih_lst->ih_dip);
354 				if (dweight > 0)
355 					ino_p->ino_intr_weight += dweight;
356 			}
357 		}
358 	}
359 
360 	for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next) {
361 		uint32_t orig_cpuid;
362 
363 		/*
364 		 * Get the weight of the ino and determine if we are going to
365 		 * process call.  We wait until an ib_intr_dist_all call of
366 		 * the proper weight occurs to support redistribution of all
367 		 * heavy weighted interrupts first (across all nexus driver
368 		 * instances).  This is done to ensure optimal
369 		 * INTR_WEIGHTED_DIST behavior.
370 		 */
371 		if ((weight == ino_p->ino_intr_weight) ||
372 		    ((weight >= weight_max) &&
373 		    (ino_p->ino_intr_weight >= weight_max))) {
374 			/* select cpuid to target and mark ino established */
375 			orig_cpuid = ino_p->ino_cpuid;
376 			if (cpu[orig_cpuid] == NULL)
377 				orig_cpuid = CPU->cpu_id;
378 			ino_p->ino_cpuid = pci_intr_dist_cpuid(ib_p, ino_p);
379 			ino_p->ino_established = 1;
380 
381 			/* Add device weight of ino devinfos to targeted cpu. */
382 			for (i = 0, ih_lst = ino_p->ino_ih_head;
383 			    i < ino_p->ino_ih_size;
384 			    i++, ih_lst = ih_lst->ih_next) {
385 				hrtime_t ticks;
386 
387 				dweight = i_ddi_get_intr_weight(ih_lst->ih_dip);
388 				intr_dist_cpuid_add_device_weight(
389 				    ino_p->ino_cpuid, ih_lst->ih_dip, dweight);
390 
391 				/*
392 				 * different cpus may have different clock
393 				 * speeds. to account for this, whenever an
394 				 * interrupt is moved to a new CPU, we
395 				 * convert the accumulated ticks into nsec,
396 				 * based upon the clock rate of the prior
397 				 * CPU.
398 				 *
399 				 * It is possible that the prior CPU no longer
400 				 * exists. In this case, fall back to using
401 				 * this CPU's clock rate.
402 				 *
403 				 * Note that the value in ih_ticks has already
404 				 * been corrected for any power savings mode
405 				 * which might have been in effect.
406 				 *
407 				 * because we are updating two fields in
408 				 * ih_t we must lock pciintr_ks_template_lock
409 				 * to prevent someone from reading the kstats
410 				 * after we set ih_ticks to 0 and before we
411 				 * increment ih_nsec to compensate.
412 				 *
413 				 * we must also protect against the interrupt
414 				 * arriving and incrementing ih_ticks between
415 				 * the time we read it and when we reset it
416 				 * to 0. To do this we use atomic_swap.
417 				 */
418 
419 				mutex_enter(&pciintr_ks_template_lock);
420 				ticks = atomic_swap_64(&ih_lst->ih_ticks, 0);
421 				ih_lst->ih_nsec += (uint64_t)
422 				    tick2ns(ticks, orig_cpuid);
423 				mutex_exit(&pciintr_ks_template_lock);
424 			}
425 
426 			/* program the hardware */
427 			ib_intr_dist(ib_p, ino_p);
428 		}
429 	}
430 	mutex_exit(&ib_p->ib_ino_lst_mutex);
431 }
432 
433 /*
434  * Reset interrupts to IDLE.  This function is called during
435  * panic handling after redistributing interrupts; it's needed to
436  * support dumping to network devices after 'sync' from OBP.
437  *
438  * N.B.  This routine runs in a context where all other threads
439  * are permanently suspended.
440  */
441 static uint_t
442 ib_intr_reset(void *arg)
443 {
444 	ib_t *ib_p = (ib_t *)arg;
445 	ib_ino_t ino;
446 	uint64_t *clr_reg;
447 
448 	/*
449 	 * Note that we only actually care about interrupts that are
450 	 * potentially from network devices.
451 	 */
452 	for (ino = 0; ino <= ib_p->ib_max_ino; ino++) {
453 		clr_reg = ib_clear_intr_reg_addr(ib_p, ino);
454 		IB_INO_INTR_CLEAR(clr_reg);
455 	}
456 
457 	return (BF_NONE);
458 }
459 
460 void
461 ib_suspend(ib_t *ib_p)
462 {
463 	ib_ino_info_t *ip;
464 	pci_t *pci_p = ib_p->ib_pci_p;
465 
466 	/* save ino_lst interrupts' mapping registers content */
467 	mutex_enter(&ib_p->ib_ino_lst_mutex);
468 	for (ip = ib_p->ib_ino_lst; ip; ip = ip->ino_next)
469 		ip->ino_map_reg_save = *ip->ino_map_reg;
470 	mutex_exit(&ib_p->ib_ino_lst_mutex);
471 
472 	if (CHIP_TYPE(pci_p) != PCI_CHIP_XMITS) {
473 		ib_p->ib_upa_imr_state[0] = *ib_p->ib_upa_imr[0];
474 		ib_p->ib_upa_imr_state[1] = *ib_p->ib_upa_imr[1];
475 	}
476 }
477 
478 void
479 ib_resume(ib_t *ib_p)
480 {
481 	ib_ino_info_t *ip;
482 	pci_t *pci_p = ib_p->ib_pci_p;
483 
484 	/* restore ino_lst interrupts' mapping registers content */
485 	mutex_enter(&ib_p->ib_ino_lst_mutex);
486 	for (ip = ib_p->ib_ino_lst; ip; ip = ip->ino_next) {
487 		IB_INO_INTR_CLEAR(ip->ino_clr_reg);	 /* set intr to idle */
488 		*ip->ino_map_reg = ip->ino_map_reg_save; /* restore IMR */
489 	}
490 	mutex_exit(&ib_p->ib_ino_lst_mutex);
491 
492 	if (CHIP_TYPE(pci_p) != PCI_CHIP_XMITS) {
493 		*ib_p->ib_upa_imr[0] = ib_p->ib_upa_imr_state[0];
494 		*ib_p->ib_upa_imr[1] = ib_p->ib_upa_imr_state[1];
495 	}
496 }
497 
498 /*
499  * locate ino_info structure on ib_p->ib_ino_lst according to ino#
500  * returns NULL if not found.
501  */
502 ib_ino_info_t *
503 ib_locate_ino(ib_t *ib_p, ib_ino_t ino_num)
504 {
505 	ib_ino_info_t *ino_p = ib_p->ib_ino_lst;
506 	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
507 
508 	for (; ino_p && ino_p->ino_ino != ino_num; ino_p = ino_p->ino_next);
509 	return (ino_p);
510 }
511 
512 #define	IB_INO_TO_SLOT(ino) (IB_IS_OBIO_INO(ino) ? 0xff : ((ino) & 0x1f) >> 2)
513 
514 ib_ino_info_t *
515 ib_new_ino(ib_t *ib_p, ib_ino_t ino_num, ih_t *ih_p)
516 {
517 	ib_ino_info_t *ino_p = kmem_alloc(sizeof (ib_ino_info_t), KM_SLEEP);
518 	ino_p->ino_ino = ino_num;
519 	ino_p->ino_slot_no = IB_INO_TO_SLOT(ino_num);
520 	ino_p->ino_ib_p = ib_p;
521 	ino_p->ino_clr_reg = ib_clear_intr_reg_addr(ib_p, ino_num);
522 	ino_p->ino_map_reg = ib_intr_map_reg_addr(ib_p, ino_num);
523 	ino_p->ino_unclaimed = 0;
524 
525 	/*
526 	 * cannot disable interrupt since we might share slot
527 	 * IB_INO_INTR_OFF(ino_p->ino_map_reg);
528 	 */
529 
530 	ih_p->ih_next = ih_p;
531 	ino_p->ino_ih_head = ih_p;
532 	ino_p->ino_ih_tail = ih_p;
533 	ino_p->ino_ih_start = ih_p;
534 	ino_p->ino_ih_size = 1;
535 
536 	ino_p->ino_next = ib_p->ib_ino_lst;
537 	ib_p->ib_ino_lst = ino_p;
538 	return (ino_p);
539 }
540 
541 /* the ino_p is retrieved by previous call to ib_locate_ino() */
542 void
543 ib_delete_ino(ib_t *ib_p, ib_ino_info_t *ino_p)
544 {
545 	ib_ino_info_t *list = ib_p->ib_ino_lst;
546 	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
547 	if (list == ino_p)
548 		ib_p->ib_ino_lst = list->ino_next;
549 	else {
550 		for (; list->ino_next != ino_p; list = list->ino_next);
551 		list->ino_next = ino_p->ino_next;
552 	}
553 }
554 
555 /* free all ino when we are detaching */
556 void
557 ib_free_ino_all(ib_t *ib_p)
558 {
559 	ib_ino_info_t *tmp = ib_p->ib_ino_lst;
560 	ib_ino_info_t *next = NULL;
561 	while (tmp) {
562 		next = tmp->ino_next;
563 		kmem_free(tmp, sizeof (ib_ino_info_t));
564 		tmp = next;
565 	}
566 }
567 
568 void
569 ib_ino_add_intr(pci_t *pci_p, ib_ino_info_t *ino_p, ih_t *ih_p)
570 {
571 	ib_ino_t ino = ino_p->ino_ino;
572 	ib_t *ib_p = ino_p->ino_ib_p;
573 	volatile uint64_t *state_reg = IB_INO_INTR_STATE_REG(ib_p, ino);
574 	hrtime_t start_time;
575 
576 	ASSERT(ib_p == pci_p->pci_ib_p);
577 	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
578 
579 	/* disable interrupt, this could disrupt devices sharing our slot */
580 	IB_INO_INTR_OFF(ino_p->ino_map_reg);
581 	*ino_p->ino_map_reg;
582 
583 	/* do NOT modify the link list until after the busy wait */
584 
585 	/*
586 	 * busy wait if there is interrupt being processed.
587 	 * either the pending state will be cleared by the interrupt wrapper
588 	 * or the interrupt will be marked as blocked indicating that it was
589 	 * jabbering.
590 	 */
591 	start_time = gethrtime();
592 	while ((ino_p->ino_unclaimed <= pci_unclaimed_intr_max) &&
593 		IB_INO_INTR_PENDING(state_reg, ino) && !panicstr) {
594 		if (gethrtime() - start_time > pci_intrpend_timeout) {
595 			pbm_t *pbm_p = pci_p->pci_pbm_p;
596 			cmn_err(CE_WARN, "%s:%s: ib_ino_add_intr %x timeout",
597 				pbm_p->pbm_nameinst_str,
598 				pbm_p->pbm_nameaddr_str, ino);
599 			break;
600 		}
601 	}
602 
603 	/* link up pci_ispec_t portion of the ppd */
604 	ih_p->ih_next = ino_p->ino_ih_head;
605 	ino_p->ino_ih_tail->ih_next = ih_p;
606 	ino_p->ino_ih_tail = ih_p;
607 
608 	ino_p->ino_ih_start = ino_p->ino_ih_head;
609 	ino_p->ino_ih_size++;
610 
611 	/*
612 	 * if the interrupt was previously blocked (left in pending state)
613 	 * because of jabber we need to clear the pending state in case the
614 	 * jabber has gone away.
615 	 */
616 	if (ino_p->ino_unclaimed > pci_unclaimed_intr_max) {
617 		cmn_err(CE_WARN,
618 		    "%s%d: ib_ino_add_intr: ino 0x%x has been unblocked",
619 		    ddi_driver_name(pci_p->pci_dip),
620 		    ddi_get_instance(pci_p->pci_dip),
621 		    ino_p->ino_ino);
622 		ino_p->ino_unclaimed = 0;
623 		IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);
624 	}
625 
626 	/* re-enable interrupt */
627 	IB_INO_INTR_ON(ino_p->ino_map_reg);
628 	*ino_p->ino_map_reg;
629 }
630 
631 /*
632  * removes pci_ispec_t from the ino's link list.
633  * uses hardware mutex to lock out interrupt threads.
634  * Side effects: interrupt belongs to that ino is turned off on return.
635  * if we are sharing PCI slot with other inos, the caller needs
636  * to turn it back on.
637  */
638 void
639 ib_ino_rem_intr(pci_t *pci_p, ib_ino_info_t *ino_p, ih_t *ih_p)
640 {
641 	int i;
642 	ib_ino_t ino = ino_p->ino_ino;
643 	ih_t *ih_lst = ino_p->ino_ih_head;
644 	volatile uint64_t *state_reg =
645 		IB_INO_INTR_STATE_REG(ino_p->ino_ib_p, ino);
646 	hrtime_t start_time;
647 
648 	ASSERT(MUTEX_HELD(&ino_p->ino_ib_p->ib_ino_lst_mutex));
649 	/* disable interrupt, this could disrupt devices sharing our slot */
650 	IB_INO_INTR_OFF(ino_p->ino_map_reg);
651 	*ino_p->ino_map_reg;
652 
653 	/* do NOT modify the link list until after the busy wait */
654 
655 	/*
656 	 * busy wait if there is interrupt being processed.
657 	 * either the pending state will be cleared by the interrupt wrapper
658 	 * or the interrupt will be marked as blocked indicating that it was
659 	 * jabbering.
660 	 */
661 	start_time = gethrtime();
662 	while ((ino_p->ino_unclaimed <= pci_unclaimed_intr_max) &&
663 		IB_INO_INTR_PENDING(state_reg, ino) && !panicstr) {
664 		if (gethrtime() - start_time > pci_intrpend_timeout) {
665 			pbm_t *pbm_p = pci_p->pci_pbm_p;
666 			cmn_err(CE_WARN, "%s:%s: ib_ino_rem_intr %x timeout",
667 				pbm_p->pbm_nameinst_str,
668 				pbm_p->pbm_nameaddr_str, ino);
669 			break;
670 		}
671 	}
672 
673 	if (ino_p->ino_ih_size == 1) {
674 		if (ih_lst != ih_p)
675 			goto not_found;
676 		/* no need to set head/tail as ino_p will be freed */
677 		goto reset;
678 	}
679 
680 	/*
681 	 * if the interrupt was previously blocked (left in pending state)
682 	 * because of jabber we need to clear the pending state in case the
683 	 * jabber has gone away.
684 	 */
685 	if (ino_p->ino_unclaimed > pci_unclaimed_intr_max) {
686 		cmn_err(CE_WARN,
687 		    "%s%d: ib_ino_rem_intr: ino 0x%x has been unblocked",
688 		    ddi_driver_name(pci_p->pci_dip),
689 		    ddi_get_instance(pci_p->pci_dip),
690 		    ino_p->ino_ino);
691 		ino_p->ino_unclaimed = 0;
692 		IB_INO_INTR_CLEAR(ino_p->ino_clr_reg);
693 	}
694 
695 	/* search the link list for ih_p */
696 	for (i = 0;
697 		(i < ino_p->ino_ih_size) && (ih_lst->ih_next != ih_p);
698 		i++, ih_lst = ih_lst->ih_next);
699 	if (ih_lst->ih_next != ih_p)
700 		goto not_found;
701 
702 	/* remove ih_p from the link list and maintain the head/tail */
703 	ih_lst->ih_next = ih_p->ih_next;
704 	if (ino_p->ino_ih_head == ih_p)
705 		ino_p->ino_ih_head = ih_p->ih_next;
706 	if (ino_p->ino_ih_tail == ih_p)
707 		ino_p->ino_ih_tail = ih_lst;
708 	ino_p->ino_ih_start = ino_p->ino_ih_head;
709 reset:
710 	if (ih_p->ih_config_handle)
711 		pci_config_teardown(&ih_p->ih_config_handle);
712 	if (ih_p->ih_ksp != NULL)
713 		kstat_delete(ih_p->ih_ksp);
714 	kmem_free(ih_p, sizeof (ih_t));
715 	ino_p->ino_ih_size--;
716 
717 	return;
718 not_found:
719 	DEBUG2(DBG_R_INTX, ino_p->ino_ib_p->ib_pci_p->pci_dip,
720 		"ino_p=%x does not have ih_p=%x\n", ino_p, ih_p);
721 }
722 
723 ih_t *
724 ib_ino_locate_intr(ib_ino_info_t *ino_p, dev_info_t *rdip, uint32_t inum)
725 {
726 	ih_t *ih_lst = ino_p->ino_ih_head;
727 	int i;
728 	for (i = 0; i < ino_p->ino_ih_size; i++, ih_lst = ih_lst->ih_next) {
729 		if (ih_lst->ih_dip == rdip &&
730 		    ih_lst->ih_inum == inum)
731 			return (ih_lst);
732 	}
733 	return ((ih_t *)NULL);
734 }
735 
736 ih_t *
737 ib_alloc_ih(dev_info_t *rdip, uint32_t inum,
738     uint_t (*int_handler)(caddr_t int_handler_arg1, caddr_t int_handler_arg2),
739     caddr_t int_handler_arg1,
740     caddr_t int_handler_arg2)
741 {
742 	ih_t *ih_p;
743 
744 	ih_p = kmem_alloc(sizeof (ih_t), KM_SLEEP);
745 	ih_p->ih_dip = rdip;
746 	ih_p->ih_inum = inum;
747 	ih_p->ih_intr_state = PCI_INTR_STATE_DISABLE;
748 	ih_p->ih_handler = int_handler;
749 	ih_p->ih_handler_arg1 = int_handler_arg1;
750 	ih_p->ih_handler_arg2 = int_handler_arg2;
751 	ih_p->ih_config_handle = NULL;
752 	ih_p->ih_nsec = 0;
753 	ih_p->ih_ticks = 0;
754 	ih_p->ih_ksp = NULL;
755 
756 	return (ih_p);
757 }
758 
759 int
760 ib_update_intr_state(pci_t *pci_p, dev_info_t *rdip,
761     ddi_intr_handle_impl_t *hdlp, uint_t new_intr_state)
762 {
763 	ib_t		*ib_p = pci_p->pci_ib_p;
764 	ddi_ispec_t	*ip = (ddi_ispec_t *)hdlp->ih_private;
765 	ib_ino_info_t	*ino_p;
766 	ib_mondo_t	mondo;
767 	ih_t		*ih_p;
768 	int		ret = DDI_FAILURE;
769 
770 	mutex_enter(&ib_p->ib_ino_lst_mutex);
771 
772 	if ((mondo = pci_xlate_intr(pci_p->pci_dip, rdip, pci_p->pci_ib_p,
773 	    IB_MONDO_TO_INO(*ip->is_intr))) == 0) {
774 		mutex_exit(&ib_p->ib_ino_lst_mutex);
775 		return (ret);
776 	}
777 
778 	if (ino_p = ib_locate_ino(ib_p, IB_MONDO_TO_INO(mondo))) {
779 		if (ih_p = ib_ino_locate_intr(ino_p, rdip, hdlp->ih_inum)) {
780 			ih_p->ih_intr_state = new_intr_state;
781 			ret = DDI_SUCCESS;
782 		}
783 	}
784 
785 	mutex_exit(&ib_p->ib_ino_lst_mutex);
786 	return (ret);
787 }
788