xref: /titanic_52/usr/src/uts/sun4/io/px/px_ib.c (revision ace1a5f11236a072fca1b5e0ea1416a083a9f2aa)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * PX Interrupt Block implementation
31  */
32 
33 #include <sys/types.h>
34 #include <sys/kmem.h>
35 #include <sys/async.h>
36 #include <sys/systm.h>		/* panicstr */
37 #include <sys/spl.h>
38 #include <sys/sunddi.h>
39 #include <sys/machsystm.h>	/* intr_dist_add */
40 #include <sys/ddi_impldefs.h>
41 #include <sys/cpuvar.h>
42 #include "px_obj.h"
43 
44 /*LINTLIBRARY*/
45 
46 static void px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight);
47 static void px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p,
48     uint32_t cpu_id);
49 static uint_t px_ib_intr_reset(void *arg);
50 static void px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name,
51     char *path_name, int instance);
52 
53 int
54 px_ib_attach(px_t *px_p)
55 {
56 	dev_info_t	*dip = px_p->px_dip;
57 	px_ib_t		*ib_p;
58 	sysino_t	sysino;
59 	px_fault_t	*fault_p = &px_p->px_fault;
60 
61 	DBG(DBG_IB, dip, "px_ib_attach\n");
62 
63 	if (px_lib_intr_devino_to_sysino(px_p->px_dip,
64 	    px_p->px_inos[PX_INTR_PEC], &sysino) != DDI_SUCCESS)
65 		return (DDI_FAILURE);
66 
67 	/*
68 	 * Allocate interrupt block state structure and link it to
69 	 * the px state structure.
70 	 */
71 	ib_p = kmem_zalloc(sizeof (px_ib_t), KM_SLEEP);
72 	px_p->px_ib_p = ib_p;
73 	ib_p->ib_px_p = px_p;
74 	ib_p->ib_ino_lst = (px_ib_ino_info_t *)NULL;
75 
76 	mutex_init(&ib_p->ib_intr_lock, NULL, MUTEX_DRIVER, NULL);
77 	mutex_init(&ib_p->ib_ino_lst_mutex, NULL, MUTEX_DRIVER, NULL);
78 
79 	bus_func_register(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p);
80 
81 	intr_dist_add_weighted(px_ib_intr_redist, ib_p);
82 
83 	/*
84 	 * Initialize PEC fault data structure
85 	 */
86 	fault_p->px_fh_dip = dip;
87 	fault_p->px_fh_sysino = sysino;
88 	fault_p->px_err_func = px_err_dmc_pec_intr;
89 	fault_p->px_intr_ino = px_p->px_inos[PX_INTR_PEC];
90 
91 	return (DDI_SUCCESS);
92 }
93 
94 void
95 px_ib_detach(px_t *px_p)
96 {
97 	px_ib_t		*ib_p = px_p->px_ib_p;
98 	dev_info_t	*dip = px_p->px_dip;
99 
100 	DBG(DBG_IB, dip, "px_ib_detach\n");
101 
102 	bus_func_unregister(BF_TYPE_RESINTR, px_ib_intr_reset, ib_p);
103 	intr_dist_rem_weighted(px_ib_intr_redist, ib_p);
104 
105 	mutex_destroy(&ib_p->ib_ino_lst_mutex);
106 	mutex_destroy(&ib_p->ib_intr_lock);
107 
108 	px_ib_free_ino_all(ib_p);
109 
110 	px_p->px_ib_p = NULL;
111 	kmem_free(ib_p, sizeof (px_ib_t));
112 }
113 
114 void
115 px_ib_intr_enable(px_t *px_p, cpuid_t cpu_id, devino_t ino)
116 {
117 	px_ib_t		*ib_p = px_p->px_ib_p;
118 	sysino_t	sysino;
119 
120 	/*
121 	 * Determine the cpu for the interrupt
122 	 */
123 	mutex_enter(&ib_p->ib_intr_lock);
124 
125 	DBG(DBG_IB, px_p->px_dip,
126 	    "px_ib_intr_enable: ino=%x cpu_id=%x\n", ino, cpu_id);
127 
128 	if (px_lib_intr_devino_to_sysino(px_p->px_dip, ino,
129 	    &sysino) != DDI_SUCCESS) {
130 		DBG(DBG_IB, px_p->px_dip,
131 		    "px_ib_intr_enable: px_intr_devino_to_sysino() failed\n");
132 
133 		mutex_exit(&ib_p->ib_intr_lock);
134 		return;
135 	}
136 
137 	PX_INTR_ENABLE(px_p->px_dip, sysino, cpu_id);
138 
139 	mutex_exit(&ib_p->ib_intr_lock);
140 }
141 
142 /*ARGSUSED*/
143 void
144 px_ib_intr_disable(px_ib_t *ib_p, devino_t ino, int wait)
145 {
146 	sysino_t	sysino;
147 
148 	mutex_enter(&ib_p->ib_intr_lock);
149 
150 	DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_disable: ino=%x\n", ino);
151 
152 	/* Disable the interrupt */
153 	if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, ino,
154 	    &sysino) != DDI_SUCCESS) {
155 		DBG(DBG_IB, ib_p->ib_px_p->px_dip,
156 		    "px_ib_intr_disable: px_intr_devino_to_sysino() failed\n");
157 
158 		mutex_exit(&ib_p->ib_intr_lock);
159 		return;
160 	}
161 
162 	PX_INTR_DISABLE(ib_p->ib_px_p->px_dip, sysino);
163 
164 	mutex_exit(&ib_p->ib_intr_lock);
165 }
166 
167 
168 void
169 px_ib_intr_dist_en(dev_info_t *dip, cpuid_t cpu_id, devino_t ino,
170     boolean_t wait_flag)
171 {
172 	uint32_t	old_cpu_id;
173 	sysino_t	sysino;
174 	intr_valid_state_t	enabled = 0;
175 	hrtime_t	start_time;
176 	intr_state_t	intr_state;
177 	int		e = DDI_SUCCESS;
178 
179 	DBG(DBG_IB, dip, "px_ib_intr_dist_en: ino=0x%x\n", ino);
180 
181 	if (px_lib_intr_devino_to_sysino(dip, ino, &sysino) != DDI_SUCCESS) {
182 		DBG(DBG_IB, dip, "px_ib_intr_dist_en: "
183 		    "px_intr_devino_to_sysino() failed, ino 0x%x\n", ino);
184 		return;
185 	}
186 
187 	/* Skip enabling disabled interrupts */
188 	if (px_lib_intr_getvalid(dip, sysino, &enabled) != DDI_SUCCESS) {
189 		DBG(DBG_IB, dip, "px_ib_intr_dist_en: px_intr_getvalid() "
190 		    "failed, sysino 0x%x\n", sysino);
191 		return;
192 	}
193 	if (!enabled)
194 		return;
195 
196 	/* Done if redistributed onto the same cpuid */
197 	if (px_lib_intr_gettarget(dip, sysino, &old_cpu_id) != DDI_SUCCESS) {
198 		DBG(DBG_IB, dip, "px_ib_intr_dist_en: "
199 		    "px_intr_gettarget() failed\n");
200 		return;
201 	}
202 	if (cpu_id == old_cpu_id)
203 		return;
204 
205 	if (!wait_flag)
206 		goto done;
207 
208 	/* Busy wait on pending interrupts */
209 	PX_INTR_DISABLE(dip, sysino);
210 
211 	for (start_time = gethrtime(); !panicstr &&
212 	    ((e = px_lib_intr_getstate(dip, sysino, &intr_state)) ==
213 		DDI_SUCCESS) &&
214 	    (intr_state == INTR_DELIVERED_STATE); /* */) {
215 		if (gethrtime() - start_time > px_intrpend_timeout) {
216 			cmn_err(CE_WARN,
217 			    "%s%d: px_ib_intr_dist_en: sysino 0x%x(ino 0x%x) "
218 			    "from cpu id 0x%x to 0x%x timeout",
219 			    ddi_driver_name(dip), ddi_get_instance(dip),
220 			    sysino, ino, old_cpu_id, cpu_id);
221 
222 			e = DDI_FAILURE;
223 			break;
224 		}
225 	}
226 
227 	if (e != DDI_SUCCESS)
228 		DBG(DBG_IB, dip, "px_ib_intr_dist_en: failed, "
229 		    "ino 0x%x sysino 0x%x\n", ino, sysino);
230 
231 done:
232 	PX_INTR_ENABLE(dip, sysino, cpu_id);
233 }
234 
235 static void
236 px_ib_cpu_ticks_to_ih_nsec(px_ib_t *ib_p, px_ih_t *ih_p, uint32_t cpu_id)
237 {
238 	extern kmutex_t pxintr_ks_template_lock;
239 	hrtime_t ticks;
240 
241 	/*
242 	 * Because we are updating two fields in ih_t we must lock
243 	 * pxintr_ks_template_lock to prevent someone from reading the
244 	 * kstats after we set ih_ticks to 0 and before we increment
245 	 * ih_nsec to compensate.
246 	 *
247 	 * We must also protect against the interrupt arriving and incrementing
248 	 * ih_ticks between the time we read it and when we reset it to 0.
249 	 * To do this we use atomic_swap.
250 	 */
251 
252 	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
253 
254 	mutex_enter(&pxintr_ks_template_lock);
255 	ticks = atomic_swap_64(&ih_p->ih_ticks, 0);
256 	ih_p->ih_nsec += (uint64_t)tick2ns(ticks, cpu_id);
257 	mutex_exit(&pxintr_ks_template_lock);
258 }
259 
260 
261 /*
262  * Redistribute interrupts of the specified weight. The first call has a weight
263  * of weight_max, which can be used to trigger initialization for
264  * redistribution. The inos with weight [weight_max, inf.) should be processed
265  * on the "weight == weight_max" call.  This first call is followed by calls
266  * of decreasing weights, inos of that weight should be processed.  The final
267  * call specifies a weight of zero, this can be used to trigger processing of
268  * stragglers.
269  */
270 static void
271 px_ib_intr_redist(void *arg, int32_t weight_max, int32_t weight)
272 {
273 	px_ib_t		*ib_p = (px_ib_t *)arg;
274 	px_t		*px_p = ib_p->ib_px_p;
275 	dev_info_t	*dip = px_p->px_dip;
276 	px_ib_ino_info_t *ino_p;
277 	px_ih_t		*ih_lst;
278 	int32_t		dweight = 0;
279 	int		i;
280 
281 	/* Redistribute internal interrupts */
282 	if (weight == 0) {
283 		devino_t	ino_pec = px_p->px_inos[PX_INTR_PEC];
284 
285 		mutex_enter(&ib_p->ib_intr_lock);
286 		px_ib_intr_dist_en(dip, intr_dist_cpuid(), ino_pec, B_FALSE);
287 		mutex_exit(&ib_p->ib_intr_lock);
288 	}
289 
290 	/* Redistribute device interrupts */
291 	mutex_enter(&ib_p->ib_ino_lst_mutex);
292 
293 	for (ino_p = ib_p->ib_ino_lst; ino_p; ino_p = ino_p->ino_next) {
294 		uint32_t orig_cpuid;
295 
296 		/*
297 		 * Recomputes the sum of interrupt weights of devices that
298 		 * share the same ino upon first call marked by
299 		 * (weight == weight_max).
300 		 */
301 		if (weight == weight_max) {
302 			ino_p->ino_intr_weight = 0;
303 			for (i = 0, ih_lst = ino_p->ino_ih_head;
304 			    i < ino_p->ino_ih_size;
305 			    i++, ih_lst = ih_lst->ih_next) {
306 				dweight = i_ddi_get_intr_weight(ih_lst->ih_dip);
307 				if (dweight > 0)
308 					ino_p->ino_intr_weight += dweight;
309 			}
310 		}
311 
312 		/*
313 		 * As part of redistributing weighted interrupts over cpus,
314 		 * nexus redistributes device interrupts and updates
315 		 * cpu weight. The purpose is for the most light weighted
316 		 * cpu to take the next interrupt and gain weight, therefore
317 		 * attention demanding device gains more cpu attention by
318 		 * making itself heavy.
319 		 */
320 		if ((weight == ino_p->ino_intr_weight) ||
321 		    ((weight >= weight_max) &&
322 		    (ino_p->ino_intr_weight >= weight_max))) {
323 			orig_cpuid = ino_p->ino_cpuid;
324 			if (cpu[orig_cpuid] == NULL)
325 				orig_cpuid = CPU->cpu_id;
326 
327 			/* select cpuid to target and mark ino established */
328 			ino_p->ino_cpuid = intr_dist_cpuid();
329 
330 			/* Add device weight to targeted cpu. */
331 			for (i = 0, ih_lst = ino_p->ino_ih_head;
332 			    i < ino_p->ino_ih_size;
333 			    i++, ih_lst = ih_lst->ih_next) {
334 
335 				dweight = i_ddi_get_intr_weight(ih_lst->ih_dip);
336 				intr_dist_cpuid_add_device_weight(
337 				    ino_p->ino_cpuid, ih_lst->ih_dip, dweight);
338 
339 				/*
340 				 * Different cpus may have different clock
341 				 * speeds. to account for this, whenever an
342 				 * interrupt is moved to a new CPU, we
343 				 * convert the accumulated ticks into nsec,
344 				 * based upon the clock rate of the prior
345 				 * CPU.
346 				 *
347 				 * It is possible that the prior CPU no longer
348 				 * exists. In this case, fall back to using
349 				 * this CPU's clock rate.
350 				 *
351 				 * Note that the value in ih_ticks has already
352 				 * been corrected for any power savings mode
353 				 * which might have been in effect.
354 				 */
355 				px_ib_cpu_ticks_to_ih_nsec(ib_p, ih_lst,
356 				    orig_cpuid);
357 			}
358 
359 			/* enable interrupt on new targeted cpu */
360 			px_ib_intr_dist_en(dip, ino_p->ino_cpuid,
361 			    ino_p->ino_ino, B_TRUE);
362 		}
363 	}
364 	mutex_exit(&ib_p->ib_ino_lst_mutex);
365 }
366 
367 /*
368  * Reset interrupts to IDLE.  This function is called during
369  * panic handling after redistributing interrupts; it's needed to
370  * support dumping to network devices after 'sync' from OBP.
371  *
372  * N.B.  This routine runs in a context where all other threads
373  * are permanently suspended.
374  */
375 static uint_t
376 px_ib_intr_reset(void *arg)
377 {
378 	px_ib_t		*ib_p = (px_ib_t *)arg;
379 
380 	DBG(DBG_IB, ib_p->ib_px_p->px_dip, "px_ib_intr_reset\n");
381 
382 	if (px_lib_intr_reset(ib_p->ib_px_p->px_dip) != DDI_SUCCESS)
383 		return (BF_FATAL);
384 
385 	return (BF_NONE);
386 }
387 
388 /*
389  * Locate ino_info structure on ib_p->ib_ino_lst according to ino#
390  * returns NULL if not found.
391  */
392 px_ib_ino_info_t *
393 px_ib_locate_ino(px_ib_t *ib_p, devino_t ino_num)
394 {
395 	px_ib_ino_info_t	*ino_p = ib_p->ib_ino_lst;
396 
397 	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
398 
399 	for (; ino_p && ino_p->ino_ino != ino_num; ino_p = ino_p->ino_next);
400 
401 	return (ino_p);
402 }
403 
404 px_ib_ino_info_t *
405 px_ib_new_ino(px_ib_t *ib_p, devino_t ino_num, px_ih_t *ih_p)
406 {
407 	px_ib_ino_info_t	*ino_p = kmem_alloc(sizeof (px_ib_ino_info_t),
408 	    KM_SLEEP);
409 	sysino_t	sysino;
410 
411 	ino_p->ino_ino = ino_num;
412 	ino_p->ino_ib_p = ib_p;
413 	ino_p->ino_unclaimed = 0;
414 
415 	if (px_lib_intr_devino_to_sysino(ib_p->ib_px_p->px_dip, ino_p->ino_ino,
416 	    &sysino) != DDI_SUCCESS)
417 		return (NULL);
418 
419 	ino_p->ino_sysino = sysino;
420 
421 	/*
422 	 * Cannot disable interrupt since we might share slot
423 	 */
424 	ih_p->ih_next = ih_p;
425 	ino_p->ino_ih_head = ih_p;
426 	ino_p->ino_ih_tail = ih_p;
427 	ino_p->ino_ih_start = ih_p;
428 	ino_p->ino_ih_size = 1;
429 
430 	ino_p->ino_next = ib_p->ib_ino_lst;
431 	ib_p->ib_ino_lst = ino_p;
432 
433 	return (ino_p);
434 }
435 
436 /*
437  * The ino_p is retrieved by previous call to px_ib_locate_ino().
438  */
439 void
440 px_ib_delete_ino(px_ib_t *ib_p, px_ib_ino_info_t *ino_p)
441 {
442 	px_ib_ino_info_t	*list = ib_p->ib_ino_lst;
443 
444 	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
445 
446 	if (list == ino_p)
447 		ib_p->ib_ino_lst = list->ino_next;
448 	else {
449 		for (; list->ino_next != ino_p; list = list->ino_next);
450 		list->ino_next = ino_p->ino_next;
451 	}
452 }
453 
454 /*
455  * Free all ino when we are detaching.
456  */
457 void
458 px_ib_free_ino_all(px_ib_t *ib_p)
459 {
460 	px_ib_ino_info_t	*tmp = ib_p->ib_ino_lst;
461 	px_ib_ino_info_t	*next = NULL;
462 
463 	while (tmp) {
464 		next = tmp->ino_next;
465 		kmem_free(tmp, sizeof (px_ib_ino_info_t));
466 		tmp = next;
467 	}
468 }
469 
470 int
471 px_ib_ino_add_intr(px_t *px_p, px_ib_ino_info_t *ino_p, px_ih_t *ih_p)
472 {
473 	px_ib_t		*ib_p = ino_p->ino_ib_p;
474 	devino_t	ino = ino_p->ino_ino;
475 	sysino_t	sysino = ino_p->ino_sysino;
476 	dev_info_t	*dip = px_p->px_dip;
477 	cpuid_t		curr_cpu;
478 	hrtime_t	start_time;
479 	intr_state_t	intr_state;
480 	int		ret = DDI_SUCCESS;
481 
482 	ASSERT(MUTEX_HELD(&ib_p->ib_ino_lst_mutex));
483 	ASSERT(ib_p == px_p->px_ib_p);
484 
485 	DBG(DBG_IB, dip, "px_ib_ino_add_intr ino=%x\n", ino_p->ino_ino);
486 
487 	/* Disable the interrupt */
488 	if ((ret = px_lib_intr_gettarget(dip, sysino,
489 	    &curr_cpu)) != DDI_SUCCESS) {
490 		DBG(DBG_IB, dip,
491 		    "px_ib_ino_add_intr px_intr_gettarget() failed\n");
492 
493 		return (ret);
494 	}
495 
496 	PX_INTR_DISABLE(dip, sysino);
497 
498 	/* Busy wait on pending interrupt */
499 	for (start_time = gethrtime(); !panicstr &&
500 	    ((ret = px_lib_intr_getstate(dip, sysino, &intr_state))
501 	    == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) {
502 		if (gethrtime() - start_time > px_intrpend_timeout) {
503 			cmn_err(CE_WARN, "%s%d: px_ib_ino_add_intr: pending "
504 			    "sysino 0x%x(ino 0x%x) timeout",
505 			    ddi_driver_name(dip), ddi_get_instance(dip),
506 			    sysino, ino);
507 
508 			ret = DDI_FAILURE;
509 			break;
510 		}
511 	}
512 
513 	if (ret != DDI_SUCCESS) {
514 		DBG(DBG_IB, dip, "px_ib_ino_add_intr: failed, "
515 		    "ino 0x%x sysino 0x%x\n", ino, sysino);
516 
517 		return (ret);
518 	}
519 
520 	/* Link up px_ispec_t portion of the ppd */
521 	ih_p->ih_next = ino_p->ino_ih_head;
522 	ino_p->ino_ih_tail->ih_next = ih_p;
523 	ino_p->ino_ih_tail = ih_p;
524 
525 	ino_p->ino_ih_start = ino_p->ino_ih_head;
526 	ino_p->ino_ih_size++;
527 
528 	/*
529 	 * If the interrupt was previously blocked (left in pending state)
530 	 * because of jabber we need to clear the pending state in case the
531 	 * jabber has gone away.
532 	 */
533 	if (ino_p->ino_unclaimed > px_unclaimed_intr_max) {
534 		cmn_err(CE_WARN,
535 		    "%s%d: px_ib_ino_add_intr: ino 0x%x has been unblocked",
536 		    ddi_driver_name(dip), ddi_get_instance(dip), ino);
537 
538 		ino_p->ino_unclaimed = 0;
539 		if ((ret = px_lib_intr_setstate(dip, sysino,
540 		    INTR_IDLE_STATE)) != DDI_SUCCESS) {
541 			DBG(DBG_IB, px_p->px_dip,
542 			    "px_ib_ino_add_intr px_intr_setstate failed\n");
543 
544 			return (ret);
545 		}
546 	}
547 
548 	/* Re-enable interrupt */
549 	PX_INTR_ENABLE(dip, sysino, curr_cpu);
550 
551 	return (ret);
552 }
553 
554 /*
555  * Removes px_ispec_t from the ino's link list.
556  * uses hardware mutex to lock out interrupt threads.
557  * Side effects: interrupt belongs to that ino is turned off on return.
558  * if we are sharing PX slot with other inos, the caller needs
559  * to turn it back on.
560  */
561 int
562 px_ib_ino_rem_intr(px_t *px_p, px_ib_ino_info_t *ino_p, px_ih_t *ih_p)
563 {
564 	devino_t	ino = ino_p->ino_ino;
565 	sysino_t	sysino = ino_p->ino_sysino;
566 	dev_info_t	*dip = px_p->px_dip;
567 	px_ih_t		*ih_lst = ino_p->ino_ih_head;
568 	hrtime_t	start_time;
569 	intr_state_t	intr_state;
570 	int		i, ret = DDI_SUCCESS;
571 
572 	ASSERT(MUTEX_HELD(&ino_p->ino_ib_p->ib_ino_lst_mutex));
573 
574 	DBG(DBG_IB, px_p->px_dip, "px_ib_ino_rem_intr ino=%x\n",
575 	    ino_p->ino_ino);
576 
577 	/* Disable the interrupt */
578 	PX_INTR_DISABLE(px_p->px_dip, sysino);
579 
580 	if (ino_p->ino_ih_size == 1) {
581 		if (ih_lst != ih_p)
582 			goto not_found;
583 
584 		/* No need to set head/tail as ino_p will be freed */
585 		goto reset;
586 	}
587 
588 	/* Busy wait on pending interrupt */
589 	for (start_time = gethrtime(); !panicstr &&
590 	    ((ret = px_lib_intr_getstate(dip, sysino, &intr_state))
591 	    == DDI_SUCCESS) && (intr_state == INTR_DELIVERED_STATE); /* */) {
592 		if (gethrtime() - start_time > px_intrpend_timeout) {
593 			cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: pending "
594 			    "sysino 0x%x(ino 0x%x) timeout",
595 			    ddi_driver_name(dip), ddi_get_instance(dip),
596 			    sysino, ino);
597 
598 			ret = DDI_FAILURE;
599 			break;
600 		}
601 	}
602 
603 	if (ret != DDI_SUCCESS) {
604 		DBG(DBG_IB, dip, "px_ib_ino_rem_intr: failed, "
605 		    "ino 0x%x sysino 0x%x\n", ino, sysino);
606 
607 		return (ret);
608 	}
609 
610 	/*
611 	 * If the interrupt was previously blocked (left in pending state)
612 	 * because of jabber we need to clear the pending state in case the
613 	 * jabber has gone away.
614 	 */
615 	if (ino_p->ino_unclaimed > px_unclaimed_intr_max) {
616 		cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: "
617 		    "ino 0x%x has been unblocked",
618 		    ddi_driver_name(dip), ddi_get_instance(dip), ino);
619 
620 		ino_p->ino_unclaimed = 0;
621 		if ((ret = px_lib_intr_setstate(dip, sysino,
622 		    INTR_IDLE_STATE)) != DDI_SUCCESS) {
623 			DBG(DBG_IB, px_p->px_dip,
624 			    "px_ib_ino_rem_intr px_intr_setstate failed\n");
625 
626 			return (ret);
627 		}
628 	}
629 
630 	/* Search the link list for ih_p */
631 	for (i = 0; (i < ino_p->ino_ih_size) &&
632 	    (ih_lst->ih_next != ih_p); i++, ih_lst = ih_lst->ih_next);
633 
634 	if (ih_lst->ih_next != ih_p)
635 		goto not_found;
636 
637 	/* Remove ih_p from the link list and maintain the head/tail */
638 	ih_lst->ih_next = ih_p->ih_next;
639 
640 	if (ino_p->ino_ih_head == ih_p)
641 		ino_p->ino_ih_head = ih_p->ih_next;
642 	if (ino_p->ino_ih_tail == ih_p)
643 		ino_p->ino_ih_tail = ih_lst;
644 
645 	ino_p->ino_ih_start = ino_p->ino_ih_head;
646 
647 reset:
648 	if (ih_p->ih_config_handle)
649 		pci_config_teardown(&ih_p->ih_config_handle);
650 	if (ih_p->ih_ksp != NULL)
651 		kstat_delete(ih_p->ih_ksp);
652 
653 	kmem_free(ih_p, sizeof (px_ih_t));
654 	ino_p->ino_ih_size--;
655 
656 	return (ret);
657 
658 not_found:
659 	DBG(DBG_R_INTX, ino_p->ino_ib_p->ib_px_p->px_dip,
660 		"ino_p=%x does not have ih_p=%x\n", ino_p, ih_p);
661 
662 	return (DDI_FAILURE);
663 }
664 
665 px_ih_t *
666 px_ib_ino_locate_intr(px_ib_ino_info_t *ino_p, dev_info_t *rdip,
667     uint32_t inum, msiq_rec_type_t rec_type, msgcode_t msg_code)
668 {
669 	px_ih_t	*ih_lst = ino_p->ino_ih_head;
670 	int	i;
671 
672 	for (i = 0; i < ino_p->ino_ih_size; i++, ih_lst = ih_lst->ih_next) {
673 		if ((ih_lst->ih_dip == rdip) && (ih_lst->ih_inum == inum) &&
674 		    (ih_lst->ih_rec_type == rec_type) &&
675 		    (ih_lst->ih_msg_code == msg_code))
676 			return (ih_lst);
677 	}
678 
679 	return ((px_ih_t *)NULL);
680 }
681 
682 px_ih_t *
683 px_ib_alloc_ih(dev_info_t *rdip, uint32_t inum,
684     uint_t (*int_handler)(caddr_t int_handler_arg1, caddr_t int_handler_arg2),
685     caddr_t int_handler_arg1, caddr_t int_handler_arg2,
686     msiq_rec_type_t rec_type, msgcode_t msg_code)
687 {
688 	px_ih_t	*ih_p;
689 
690 	ih_p = kmem_alloc(sizeof (px_ih_t), KM_SLEEP);
691 	ih_p->ih_dip = rdip;
692 	ih_p->ih_inum = inum;
693 	ih_p->ih_intr_state = PX_INTR_STATE_DISABLE;
694 	ih_p->ih_handler = int_handler;
695 	ih_p->ih_handler_arg1 = int_handler_arg1;
696 	ih_p->ih_handler_arg2 = int_handler_arg2;
697 	ih_p->ih_config_handle = NULL;
698 	ih_p->ih_rec_type = rec_type;
699 	ih_p->ih_msg_code = msg_code;
700 	ih_p->ih_nsec = 0;
701 	ih_p->ih_ticks = 0;
702 	ih_p->ih_ksp = NULL;
703 
704 	return (ih_p);
705 }
706 
707 /*
708  * Only used for fixed or legacy interrupts.
709  */
710 int
711 px_ib_update_intr_state(px_t *px_p, dev_info_t *rdip,
712     uint_t inum, devino_t ino, uint_t new_intr_state)
713 {
714 	px_ib_t		*ib_p = px_p->px_ib_p;
715 	px_ib_ino_info_t *ino_p;
716 	px_ih_t		*ih_p;
717 	int		ret = DDI_FAILURE;
718 
719 	DBG(DBG_IB, px_p->px_dip, "ib_update_intr_state: %s%d "
720 	    "inum %x devino %x state %x\n", ddi_driver_name(rdip),
721 	    ddi_get_instance(rdip), inum, ino, new_intr_state);
722 
723 	mutex_enter(&ib_p->ib_ino_lst_mutex);
724 
725 	if (ino_p = px_ib_locate_ino(ib_p, ino)) {
726 		if (ih_p = px_ib_ino_locate_intr(ino_p, rdip, inum, 0, 0)) {
727 			ih_p->ih_intr_state = new_intr_state;
728 			ret = DDI_SUCCESS;
729 		}
730 	}
731 
732 	mutex_exit(&ib_p->ib_ino_lst_mutex);
733 	return (ret);
734 }
735 
736 
737 static void
738 px_fill_in_intr_devs(pcitool_intr_dev_t *dev, char *driver_name,
739     char *path_name, int instance)
740 {
741 	(void) strncpy(dev->driver_name, driver_name, MAXMODCONFNAME-1);
742 	dev->driver_name[MAXMODCONFNAME] = '\0';
743 	(void) strncpy(dev->path, path_name, MAXPATHLEN-1);
744 	dev->dev_inst = instance;
745 }
746 
747 
748 /*
749  * Return the dips or number of dips associated with a given interrupt block.
750  * Size of dips array arg is passed in as dips_ret arg.
751  * Number of dips returned is returned in dips_ret arg.
752  * Array of dips gets returned in the dips argument.
753  * Function returns number of dips existing for the given interrupt block.
754  *
755  * Note: this function assumes an enabled/valid INO, which is why it returns
756  * the px node and (Internal) when it finds no other devices (and *devs_ret > 0)
757  */
758 uint8_t
759 pxtool_ib_get_ino_devs(
760     px_t *px_p, uint32_t ino, uint8_t *devs_ret, pcitool_intr_dev_t *devs)
761 {
762 	px_ib_t *ib_p = px_p->px_ib_p;
763 	px_ib_ino_info_t *ino_p;
764 	px_ih_t *ih_p;
765 	uint32_t num_devs = 0;
766 	char pathname[MAXPATHLEN];
767 	int i;
768 
769 	mutex_enter(&ib_p->ib_ino_lst_mutex);
770 	ino_p = px_ib_locate_ino(ib_p, ino);
771 	if (ino_p != NULL) {
772 		num_devs = ino_p->ino_ih_size;
773 		for (i = 0, ih_p = ino_p->ino_ih_head;
774 		    ((i < ino_p->ino_ih_size) && (i < *devs_ret));
775 		    i++, ih_p = ih_p->ih_next) {
776 			(void) ddi_pathname(ih_p->ih_dip, pathname);
777 			px_fill_in_intr_devs(&devs[i],
778 			    (char *)ddi_driver_name(ih_p->ih_dip),  pathname,
779 			    ddi_get_instance(ih_p->ih_dip));
780 		}
781 		*devs_ret = i;
782 
783 	} else if (*devs_ret > 0) {
784 		(void) ddi_pathname(px_p->px_dip, pathname);
785 		strcat(pathname, " (Internal)");
786 		px_fill_in_intr_devs(&devs[0],
787 		    (char *)ddi_driver_name(px_p->px_dip),  pathname,
788 		    ddi_get_instance(px_p->px_dip));
789 		num_devs = *devs_ret = 1;
790 	}
791 
792 	mutex_exit(&ib_p->ib_ino_lst_mutex);
793 
794 	return (num_devs);
795 }
796 
797 
798 void px_ib_log_new_cpu(px_ib_t *ib_p, uint32_t old_cpu_id, uint32_t new_cpu_id,
799     uint32_t ino)
800 {
801 	px_ib_ino_info_t *ino_p;
802 
803 	mutex_enter(&ib_p->ib_ino_lst_mutex);
804 
805 	/* Log in OS data structures the new CPU. */
806 	ino_p = px_ib_locate_ino(ib_p, ino);
807 	if (ino_p != NULL) {
808 
809 		/* Log in OS data structures the new CPU. */
810 		ino_p->ino_cpuid = new_cpu_id;
811 
812 		/* Account for any residual time to be logged for old cpu. */
813 		px_ib_cpu_ticks_to_ih_nsec(ib_p, ino_p->ino_ih_head,
814 		    old_cpu_id);
815 	}
816 
817 	mutex_exit(&ib_p->ib_ino_lst_mutex);
818 }
819