xref: /titanic_50/usr/src/uts/sun4/io/px/px_ib.c (revision 30a83a24c1f8c1cdf6fad534cbcc1bfc0b884e9a)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * PX Interrupt Block implementation
31  */
32 
33 #include <sys/types.h>
34 #include <sys/kmem.h>
35 #include <sys/async.h>
36 #include <sys/systm.h>		/* panicstr */
37 #include <sys/spl.h>
38 #include <sys/sunddi.h>
39 #include <sys/machsystm.h>	/* intr_dist_add */
40 #include <sys/ddi_impldefs.h>
41 #include <sys/cpuvar.h>
42 #include "px_obj.h"
43 
44 /*LINTLIBRARY*/
45 
46 static void px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight);
47 static void px_ib_intr_dist_en(dev_info_t *dip, cpuid_t cpu_id, devino_t ino,
48     boolean_t wait_flag);
49 static uint_t px_ib_intr_reset(void *arg);
50 
51 int
52 px_ib_attach(px_t *px_p)
53 {
54 	dev_info_t	*dip = px_p->px_dip;
55 	px_ib_t		*ib_p;
56 	sysino_t	sysino;
57 	px_fault_t	*fault_p = &px_p->px_fault;
58 
59 	DBG(DBG_IB, dip, "px_ib_attach\n");
60 
61 	if (px_lib_intr_devino_to_sysino(px_p->px_dip,
62 	    px_p->px_inos[PX_INTR_PEC], &sysino) != DDI_SUCCESS)
63 		return (DDI_FAILURE);
64 
65 	/*
66 	 * Allocate interrupt block state structure and link it to
67 	 * the px state structure.
68 	 */
69 	ib_p = kmem_zalloc(sizeof (px_ib_t), KM_SLEEP);
70 	px_p->px_ib_p = ib_p;
71 	ib_p->ib_px_p = px_p;
72 	ib_p->ib_ino_lst = (px_ib_ino_info_t *)NULL;
73 
74 	mutex_init(&ib_p->ib_intr_lock, NULL, MUTEX_DRIVER, NULL);
75 	mutex_init(&ib_p->ib_ino_lst_mutex, NULL, MUTEX_DRIVER, NULL);
76 
77 	bus_func_register(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p);
78 
79 	intr_dist_add_weighted(px_ib_intr_redist, ib_p);
80 
81 	/*
82 	 * Initialize PEC fault data structure
83 	 */
84 	fault_p->px_fh_dip = dip;
85 	fault_p->px_fh_sysino = sysino;
86 	fault_p->px_err_func = px_err_dmc_pec_intr;
87 	fault_p->px_intr_ino = px_p->px_inos[PX_INTR_PEC];
88 
89 	return (DDI_SUCCESS);
90 }
91 
92 void
93 px_ib_detach(px_t *px_p)
94 {
95 	px_ib_t		*ib_p = px_p->px_ib_p;
96 	dev_info_t	*dip = px_p->px_dip;
97 
98 	DBG(DBG_IB, dip, "px_ib_detach\n");
99 
100 	bus_func_unregister(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p);
101 	intr_dist_rem_weighted(px_ib_intr_redist, ib_p);
102 
103 	mutex_destroy(&ib_p->ib_ino_lst_mutex);
104 	mutex_destroy(&ib_p->ib_intr_lock);
105 
106 	px_ib_free_ino_all(ib_p);
107 
108 	px_p->px_ib_p = NULL;
109 	kmem_free(ib_p, sizeof (px_ib_t));
110 }
111 
112 void
113 px_ib_intr_enable(px_t *px_p, cpuid_t cpu_id, devino_t ino)
114 {
115 	px_ib_t		*ib_p = px_p->px_ib_p;
116 	sysino_t	sysino;
117 
118 	/*
119 	 * Determine the cpu for the interrupt
120 	 */
121 	mutex_enter(&ib_p->ib_intr_lock);
122 
123 	DBG(DBG_IB, px_p->px_dip,
124 	    "px_ib_intr_enable: ino=%x cpu_id=%x\n", ino, cpu_id);
125 
126 	if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino,
127 	    &sysino) != DDI_SUCCESS) {
128 		DBG(DBG_IB, px_p->px_dip,
129 		    "px_ib_intr_enable: px_intr_devino_to_sysino() failed\n");
130 
131 		mutex_exit(&ib_p->ib_intr_lock);
132 		return;
133 	}
134 
135 	PX_INTR_ENABLE(px_p->px_dip, sysino, cpu_id);
136 
137 	mutex_exit(&ib_p->ib_intr_lock);
138 }
139 
140 /*ARGSUSED*/
141 void
142 px_ib_intr_disable(px_ib_t *ib_p, devino_t ino, int wait)
143 {
144 	sysino_t	sysino;
145 
146 	mutex_enter(&ib_p->ib_intr_lock);
147 
148 	DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_disable: ino=%x\n", ino);
149 
150 	/* Disable the interrupt */
151 	if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, ino,
152 	    &sysino) != DDI_SUCCESS) {
153 		DBG(DBG_IB, ib_p->ib_px_p->px_dip,
154 		    "px_ib_intr_disable: px_intr_devino_to_sysino() failed\n");
155 
156 		mutex_exit(&ib_p->ib_intr_lock);
157 		return;
158 	}
159 
160 	PX_INTR_DISABLE(ib_p->ib_px_p->px_dip, sysino);
161 
162 	mutex_exit(&ib_p->ib_intr_lock);
163 }
164 
165 
166 static void
167 px_ib_intr_dist_en(dev_info_t *dip, cpuid_t cpu_id, devino_t ino,
168     boolean_t wait_flag)
169 {
170 	uint32_t	old_cpu_id;
171 	sysino_t	sysino;
172 	intr_valid_state_t	enabled = 0;
173 	hrtime_t	start_time;
174 	intr_state_t	intr_state;
175 	int		e = DDI_SUCCESS;
176 
177 	DBG(DBG_IB, dip, "px_ib_intr_dist_en: ino=0x%x\n", ino);
178 
179 	if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) {
180 		DBG(DBG_IB, dip, "px_ib_intr_dist_en: "
181 		    "px_intr_devino_to_sysino() failed, ino 0x%x\n", ino);
182 		return;
183 	}
184 
185 	/* Skip enabling disabled interrupts */
186 	if (px_lib_intr_getvalid(dip, sysino, &enabled) != DDI_SUCCESS) {
187 		DBG(DBG_IB, dip, "px_ib_intr_dist_en: px_intr_getvalid() "
188 		    "failed, sysino 0x%x\n", sysino);
189 		return;
190 	}
191 	if (!enabled)
192 		return;
193 
194 	/* Done if redistributed onto the same cpuid */
195 	if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) {
196 		DBG(DBG_IB, dip, "px_ib_intr_dist_en: "
197 		    "px_intr_gettarget() failed\n");
198 		return;
199 	}
200 	if (cpu_id == old_cpu_id)
201 		return;
202 
203 	if (!wait_flag)
204 		goto done;
205 
206 	/* Busy wait on pending interrupts */
207 	PX_INTR_DISABLE(dip, sysino);
208 
209 	for (start_time = gethrtime(); !panicstr &&
210 	    ((e = px_lib_intr_getstate(dip, sysino, &intr_state)) ==
211 		DDI_SUCCESS) &&
212 	    (intr_state == INTR_DELIVERED_STATE); /* */) {
213 		if (gethrtime() - start_time > px_intrpend_timeout) {
214 			cmn_err(CE_WARN,
215 			    "%s%d: px_ib_intr_dist_en: sysino 0x%x(ino 0x%x) "
216 			    "from cpu id 0x%x to 0x%x timeout",
217 			    ddi_driver_name(dip), ddi_get_instance(dip),
218 			    sysino, ino, old_cpu_id, cpu_id);
219 
220 			e = DDI_FAILURE;
221 			break;
222 		}
223 	}
224 
225 	if (e != DDI_SUCCESS)
226 		DBG(DBG_IB, dip, "px_ib_intr_dist_en: failed, "
227 		    "ino 0x%x sysino 0x%x\n", ino, sysino);
228 
229 done:
230 	PX_INTR_ENABLE(dip, sysino, cpu_id);
231 }
232 
233 
234 /*
235  * Redistribute interrupts of the specified weight. The first call has a weight
236  * of weight_max, which can be used to trigger initialization for
237  * redistribution. The inos with weight [weight_max, inf.) should be processed
238  * on the "weight == weight_max" call.  This first call is followed by calls
239  * of decreasing weights, inos of that weight should be processed.  The final
240  * call specifies a weight of zero, this can be used to trigger processing of
241  * stragglers.
242  */
243 static void
244 px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight)
245 {
246 	extern kmutex_t pxintr_ks_template_lock;
247 	px_ib_t		*ib_p = (px_ib_t *)arg;
248 	px_t		*px_p = ib_p->ib_px_p;
249 	dev_info_t	*dip = px_p->px_dip;
250 	px_ib_ino_info_t *ino_p;
251 	px_ih_t		*ih_lst;
252 	int32_t		dweight = 0;
253 	int		i;
254 
255 	/* Redistribute internal interrupts */
256 	if (weight == 0) {
257 		devino_t	ino_pec = px_p->px_inos[PX_INTR_PEC];
258 
259 		mutex_enter(&ib_p->ib_intr_lock);
260 		px_ib_intr_dist_en(dip, intr_dist_cpuid(), ino_pec, B_FALSE);
261 		mutex_exit(&ib_p->ib_intr_lock);
262 	}
263 
264 	/* Redistribute device interrupts */
265 	mutex_enter(&ib_p->ib_ino_lst_mutex);
266 
267 	for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next) {
268 		uint32_t orig_cpuid;
269 
270 		/*
271 		 * Recomputes the sum of interrupt weights of devices that
272 		 * share the same ino upon first call marked by
273 		 * (weight == weight_max).
274 		 */
275 		if (weight == weight_max) {
276 			ino_p->ino_intr_weight = 0;
277 			for (i = 0, ih_lst = ino_p->ino_ih_head;
278 			    i < ino_p->ino_ih_size;
279 			    i++, ih_lst = ih_lst->ih_next) {
280 				dweight = i_ddi_get_intr_weight(ih_lst->ih_dip);
281 				if (dweight > 0)
282 					ino_p->ino_intr_weight += dweight;
283 			}
284 		}
285 
286 		/*
287 		 * As part of redistributing weighted interrupts over cpus,
288 		 * nexus redistributes device interrupts and updates
289 		 * cpu weight. The purpose is for the most light weighted
290 		 * cpu to take the next interrupt and gain weight, therefore
291 		 * attention demanding device gains more cpu attention by
292 		 * making itself heavy.
293 		 */
294 		if ((weight == ino_p->ino_intr_weight) ||
295 		    ((weight >= weight_max) &&
296 		    (ino_p->ino_intr_weight >= weight_max))) {
297 			orig_cpuid = ino_p->ino_cpuid;
298 			if (cpu[orig_cpuid] == NULL)
299 				orig_cpuid = CPU->cpu_id;
300 
301 			/* select cpuid to target and mark ino established */
302 			ino_p->ino_cpuid = intr_dist_cpuid();
303 
304 			/* Add device weight to targeted cpu. */
305 			for (i = 0, ih_lst = ino_p->ino_ih_head;
306 			    i < ino_p->ino_ih_size;
307 			    i++, ih_lst = ih_lst->ih_next) {
308 				hrtime_t ticks;
309 
310 				dweight = i_ddi_get_intr_weight(ih_lst->ih_dip);
311 				intr_dist_cpuid_add_device_weight(
312 				    ino_p->ino_cpuid, ih_lst->ih_dip, dweight);
313 
314 				/*
315 				 * different cpus may have different clock
316 				 * speeds. to account for this, whenever an
317 				 * interrupt is moved to a new CPU, we
318 				 * convert the accumulated ticks into nsec,
319 				 * based upon the clock rate of the prior
320 				 * CPU.
321 				 *
322 				 * It is possible that the prior CPU no longer
323 				 * exists. In this case, fall back to using
324 				 * this CPU's clock rate.
325 				 *
326 				 * Note that the value in ih_ticks has already
327 				 * been corrected for any power savings mode
328 				 * which might have been in effect.
329 				 *
330 				 * because we are updating two fields in
331 				 * ih_t we must lock pxintr_ks_template_lock
332 				 * to prevent someone from reading the kstats
333 				 * after we set ih_ticks to 0 and before we
334 				 * increment ih_nsec to compensate.
335 				 *
336 				 * we must also protect against the interrupt
337 				 * arriving and incrementing ih_ticks between
338 				 * the time we read it and when we reset it
339 				 * to 0. To do this we use atomic_swap.
340 				 */
341 
342 				mutex_enter(&pxintr_ks_template_lock);
343 				ticks = atomic_swap_64(&ih_lst->ih_ticks, 0);
344 				ih_lst->ih_nsec += (uint64_t)
345 				    tick2ns(ticks, orig_cpuid);
346 				mutex_exit(&pxintr_ks_template_lock);
347 			}
348 
349 			/* enable interrupt on new targeted cpu */
350 			px_ib_intr_dist_en(dip, ino_p->ino_cpuid,
351 			    ino_p->ino_ino, B_TRUE);
352 		}
353 	}
354 	mutex_exit(&ib_p->ib_ino_lst_mutex);
355 }
356 
357 /*
358  * Reset interrupts to IDLE.  This function is called during
359  * panic handling after redistributing interrupts; it's needed to
360  * support dumping to network devices after 'sync' from OBP.
361  *
362  * N.B.  This routine runs in a context where all other threads
363  * are permanently suspended.
364  */
365 static uint_t
366 px_ib_intr_reset(void *arg)
367 {
368 	px_ib_t		*ib_p = (px_ib_t *)arg;
369 
370 	DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_reset\n");
371 
372 	if (px_lib_intr_reset(ib_p->ib_px_p->px_dip) != DDI_SUCCESS)
373 		return (BF_FATAL);
374 
375 	return (BF_NONE);
376 }
377 
378 /*
379  * Locate ino_info structure on ib_p->ib_ino_lst according to ino#
380  * returns NULL if not found.
381  */
382 px_ib_ino_info_t *
383 px_ib_locate_ino(px_ib_t *ib_p, devino_t ino_num)
384 {
385 	px_ib_ino_info_t	*ino_p = ib_p->ib_ino_lst;
386 
387 	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
388 
389 	for (; ino_p && ino_p->ino_ino != ino_num; ino_p = ino_p->ino_next);
390 
391 	return (ino_p);
392 }
393 
394 px_ib_ino_info_t *
395 px_ib_new_ino(px_ib_t *ib_p, devino_t ino_num, px_ih_t *ih_p)
396 {
397 	px_ib_ino_info_t	*ino_p = kmem_alloc(sizeof (px_ib_ino_info_t),
398 	    KM_SLEEP);
399 	sysino_t	sysino;
400 
401 	ino_p->ino_ino = ino_num;
402 	ino_p->ino_ib_p = ib_p;
403 	ino_p->ino_unclaimed = 0;
404 
405 	if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, ino_p->ino_ino,
406 	    &sysino) != DDI_SUCCESS)
407 		return (NULL);
408 
409 	ino_p->ino_sysino = sysino;
410 
411 	/*
412 	 * Cannot disable interrupt since we might share slot
413 	 */
414 	ih_p->ih_next = ih_p;
415 	ino_p->ino_ih_head = ih_p;
416 	ino_p->ino_ih_tail = ih_p;
417 	ino_p->ino_ih_start = ih_p;
418 	ino_p->ino_ih_size = 1;
419 
420 	ino_p->ino_next = ib_p->ib_ino_lst;
421 	ib_p->ib_ino_lst = ino_p;
422 
423 	return (ino_p);
424 }
425 
426 /*
427  * The ino_p is retrieved by previous call to px_ib_locate_ino().
428  */
429 void
430 px_ib_delete_ino(px_ib_t *ib_p, px_ib_ino_info_t *ino_p)
431 {
432 	px_ib_ino_info_t	*list = ib_p->ib_ino_lst;
433 
434 	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
435 
436 	if (list == ino_p)
437 		ib_p->ib_ino_lst = list->ino_next;
438 	else {
439 		for (; list->ino_next != ino_p; list = list->ino_next);
440 		list->ino_next = ino_p->ino_next;
441 	}
442 }
443 
444 /*
445  * Free all ino when we are detaching.
446  */
447 void
448 px_ib_free_ino_all(px_ib_t *ib_p)
449 {
450 	px_ib_ino_info_t	*tmp = ib_p->ib_ino_lst;
451 	px_ib_ino_info_t	*next = NULL;
452 
453 	while (tmp) {
454 		next = tmp->ino_next;
455 		kmem_free(tmp, sizeof (px_ib_ino_info_t));
456 		tmp = next;
457 	}
458 }
459 
460 int
461 px_ib_ino_add_intr(px_t *px_p, px_ib_ino_info_t *ino_p, px_ih_t *ih_p)
462 {
463 	px_ib_t		*ib_p = ino_p->ino_ib_p;
464 	devino_t	ino = ino_p->ino_ino;
465 	sysino_t	sysino = ino_p->ino_sysino;
466 	dev_info_t	*dip = px_p->px_dip;
467 	cpuid_t		curr_cpu;
468 	hrtime_t	start_time;
469 	intr_state_t	intr_state;
470 	int		ret = DDI_SUCCESS;
471 
472 	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
473 	ASSERT(ib_p == px_p->px_ib_p);
474 
475 	DBG(DBG_IB, dip, "px_ib_ino_add_intr ino=%x\n", ino_p->ino_ino);
476 
477 	/* Disable the interrupt */
478 	if ((ret = px_lib_intr_gettarget(dip, sysino,
479 	    &curr_cpu)) != DDI_SUCCESS) {
480 		DBG(DBG_IB, dip,
481 		    "px_ib_ino_add_intr px_intr_gettarget() failed\n");
482 
483 		return (ret);
484 	}
485 
486 	PX_INTR_DISABLE(dip, sysino);
487 
488 	/* Busy wait on pending interrupt */
489 	for (start_time = gethrtime(); !panicstr &&
490 	    ((ret = px_lib_intr_getstate(dip, sysino, &intr_state))
491 	    == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) {
492 		if (gethrtime() - start_time > px_intrpend_timeout) {
493 			cmn_err(CE_WARN, "%s%d: px_ib_ino_add_intr: pending "
494 			    "sysino 0x%x(ino 0x%x) timeout",
495 			    ddi_driver_name(dip), ddi_get_instance(dip),
496 			    sysino, ino);
497 
498 			ret = DDI_FAILURE;
499 			break;
500 		}
501 	}
502 
503 	if (ret != DDI_SUCCESS) {
504 		DBG(DBG_IB, dip, "px_ib_ino_add_intr: failed, "
505 		    "ino 0x%x sysino 0x%x\n", ino, sysino);
506 
507 		return (ret);
508 	}
509 
510 	/* Link up px_ispec_t portion of the ppd */
511 	ih_p->ih_next = ino_p->ino_ih_head;
512 	ino_p->ino_ih_tail->ih_next = ih_p;
513 	ino_p->ino_ih_tail = ih_p;
514 
515 	ino_p->ino_ih_start = ino_p->ino_ih_head;
516 	ino_p->ino_ih_size++;
517 
518 	/*
519 	 * If the interrupt was previously blocked (left in pending state)
520 	 * because of jabber we need to clear the pending state in case the
521 	 * jabber has gone away.
522 	 */
523 	if (ino_p->ino_unclaimed > px_unclaimed_intr_max) {
524 		cmn_err(CE_WARN,
525 		    "%s%d: px_ib_ino_add_intr: ino 0x%x has been unblocked",
526 		    ddi_driver_name(dip), ddi_get_instance(dip), ino);
527 
528 		ino_p->ino_unclaimed = 0;
529 		if ((ret = px_lib_intr_setstate(dip, sysino,
530 		    INTR_IDLE_STATE)) != DDI_SUCCESS) {
531 			DBG(DBG_IB, px_p->px_dip,
532 			    "px_ib_ino_add_intr px_intr_setstate failed\n");
533 
534 			return (ret);
535 		}
536 	}
537 
538 	/* Re-enable interrupt */
539 	PX_INTR_ENABLE(dip, sysino, curr_cpu);
540 
541 	return (ret);
542 }
543 
544 /*
545  * Removes px_ispec_t from the ino's link list.
546  * uses hardware mutex to lock out interrupt threads.
547  * Side effects: interrupt belongs to that ino is turned off on return.
548  * if we are sharing PX slot with other inos, the caller needs
549  * to turn it back on.
550  */
551 int
552 px_ib_ino_rem_intr(px_t *px_p, px_ib_ino_info_t *ino_p, px_ih_t *ih_p)
553 {
554 	devino_t	ino = ino_p->ino_ino;
555 	sysino_t	sysino = ino_p->ino_sysino;
556 	dev_info_t	*dip = px_p->px_dip;
557 	px_ih_t		*ih_lst = ino_p->ino_ih_head;
558 	hrtime_t	start_time;
559 	intr_state_t	intr_state;
560 	int		i, ret = DDI_SUCCESS;
561 
562 	ASSERT(MUTEX_HELD(&ino_p->ino_ib_p->ib_ino_lst_mutex));
563 
564 	DBG(DBG_IB, px_p->px_dip, "px_ib_ino_rem_intr ino=%x\n",
565 	    ino_p->ino_ino);
566 
567 	/* Disable the interrupt */
568 	PX_INTR_DISABLE(px_p->px_dip, sysino);
569 
570 	if (ino_p->ino_ih_size == 1) {
571 		if (ih_lst != ih_p)
572 			goto not_found;
573 
574 		/* No need to set head/tail as ino_p will be freed */
575 		goto reset;
576 	}
577 
578 	/* Busy wait on pending interrupt */
579 	for (start_time = gethrtime(); !panicstr &&
580 	    ((ret = px_lib_intr_getstate(dip, sysino, &intr_state))
581 	    == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) {
582 		if (gethrtime() - start_time > px_intrpend_timeout) {
583 			cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: pending "
584 			    "sysino 0x%x(ino 0x%x) timeout",
585 			    ddi_driver_name(dip), ddi_get_instance(dip),
586 			    sysino, ino);
587 
588 			ret = DDI_FAILURE;
589 			break;
590 		}
591 	}
592 
593 	if (ret != DDI_SUCCESS) {
594 		DBG(DBG_IB, dip, "px_ib_ino_rem_intr: failed, "
595 		    "ino 0x%x sysino 0x%x\n", ino, sysino);
596 
597 		return (ret);
598 	}
599 
600 	/*
601 	 * If the interrupt was previously blocked (left in pending state)
602 	 * because of jabber we need to clear the pending state in case the
603 	 * jabber has gone away.
604 	 */
605 	if (ino_p->ino_unclaimed > px_unclaimed_intr_max) {
606 		cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: "
607 		    "ino 0x%x has been unblocked",
608 		    ddi_driver_name(dip), ddi_get_instance(dip), ino);
609 
610 		ino_p->ino_unclaimed = 0;
611 		if ((ret = px_lib_intr_setstate(dip, sysino,
612 		    INTR_IDLE_STATE)) != DDI_SUCCESS) {
613 			DBG(DBG_IB, px_p->px_dip,
614 			    "px_ib_ino_rem_intr px_intr_setstate failed\n");
615 
616 			return (ret);
617 		}
618 	}
619 
620 	/* Search the link list for ih_p */
621 	for (i = 0; (i < ino_p->ino_ih_size) &&
622 	    (ih_lst->ih_next != ih_p); i++, ih_lst = ih_lst->ih_next);
623 
624 	if (ih_lst->ih_next != ih_p)
625 		goto not_found;
626 
627 	/* Remove ih_p from the link list and maintain the head/tail */
628 	ih_lst->ih_next = ih_p->ih_next;
629 
630 	if (ino_p->ino_ih_head == ih_p)
631 		ino_p->ino_ih_head = ih_p->ih_next;
632 	if (ino_p->ino_ih_tail == ih_p)
633 		ino_p->ino_ih_tail = ih_lst;
634 
635 	ino_p->ino_ih_start = ino_p->ino_ih_head;
636 
637 reset:
638 	if (ih_p->ih_config_handle)
639 		pci_config_teardown(&ih_p->ih_config_handle);
640 	if (ih_p->ih_ksp != NULL)
641 		kstat_delete(ih_p->ih_ksp);
642 
643 	kmem_free(ih_p, sizeof (px_ih_t));
644 	ino_p->ino_ih_size--;
645 
646 	return (ret);
647 
648 not_found:
649 	DBG(DBG_R_INTX, ino_p->ino_ib_p->ib_px_p->px_dip,
650 		"ino_p=%x does not have ih_p=%x\n", ino_p, ih_p);
651 
652 	return (DDI_FAILURE);
653 }
654 
655 px_ih_t *
656 px_ib_ino_locate_intr(px_ib_ino_info_t *ino_p, dev_info_t *rdip,
657     uint32_t inum, msiq_rec_type_t rec_type, msgcode_t msg_code)
658 {
659 	px_ih_t	*ih_lst = ino_p->ino_ih_head;
660 	int	i;
661 
662 	for (i = 0; i < ino_p->ino_ih_size; i++, ih_lst = ih_lst->ih_next) {
663 		if ((ih_lst->ih_dip == rdip) && (ih_lst->ih_inum == inum) &&
664 		    (ih_lst->ih_rec_type == rec_type) &&
665 		    (ih_lst->ih_msg_code == msg_code))
666 			return (ih_lst);
667 	}
668 
669 	return ((px_ih_t *)NULL);
670 }
671 
672 px_ih_t *
673 px_ib_alloc_ih(dev_info_t *rdip, uint32_t inum,
674     uint_t (*int_handler)(caddr_t int_handler_arg1, caddr_t int_handler_arg2),
675     caddr_t int_handler_arg1, caddr_t int_handler_arg2,
676     msiq_rec_type_t rec_type, msgcode_t msg_code)
677 {
678 	px_ih_t	*ih_p;
679 
680 	ih_p = kmem_alloc(sizeof (px_ih_t), KM_SLEEP);
681 	ih_p->ih_dip = rdip;
682 	ih_p->ih_inum = inum;
683 	ih_p->ih_intr_state = PX_INTR_STATE_DISABLE;
684 	ih_p->ih_handler = int_handler;
685 	ih_p->ih_handler_arg1 = int_handler_arg1;
686 	ih_p->ih_handler_arg2 = int_handler_arg2;
687 	ih_p->ih_config_handle = NULL;
688 	ih_p->ih_rec_type = rec_type;
689 	ih_p->ih_msg_code = msg_code;
690 	ih_p->ih_nsec = 0;
691 	ih_p->ih_ticks = 0;
692 	ih_p->ih_ksp = NULL;
693 
694 	return (ih_p);
695 }
696 
697 /*
698  * Only used for fixed or legacy interrupts.
699  */
700 int
701 px_ib_update_intr_state(px_t *px_p, dev_info_t *rdip,
702     uint_t inum, devino_t ino, uint_t new_intr_state)
703 {
704 	px_ib_t		*ib_p = px_p->px_ib_p;
705 	px_ib_ino_info_t *ino_p;
706 	px_ih_t		*ih_p;
707 	int		ret = DDI_FAILURE;
708 
709 	DBG(DBG_IB, px_p->px_dip, "ib_update_intr_state: %s%d "
710 	    "inum %x devino %x state %x\n", ddi_driver_name(rdip),
711 	    ddi_get_instance(rdip), inum, ino, new_intr_state);
712 
713 	mutex_enter(&ib_p->ib_ino_lst_mutex);
714 
715 	if (ino_p = px_ib_locate_ino(ib_p, ino)) {
716 		if (ih_p = px_ib_ino_locate_intr(ino_p, rdip, inum, 0, 0)) {
717 			ih_p->ih_intr_state = new_intr_state;
718 			ret = DDI_SUCCESS;
719 		}
720 	}
721 
722 	mutex_exit(&ib_p->ib_ino_lst_mutex);
723 	return (ret);
724 }
725