xref: /titanic_41/usr/src/uts/common/io/1394/adapters/hci1394_ixl_isr.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 1999-2002 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * hci1394_ixl_isr.c
31  *    Isochronous IXL Interrupt Service Routines.
32  *    The interrupt handler determines which OpenHCI DMA descriptors
33  *    have been executed by the hardware, tracks the path in the
34  *    corresponding IXL program, issues callbacks as needed, and resets
35  *    the OpenHCI DMA descriptors.
36  */
37 
38 #include <sys/types.h>
39 #include <sys/conf.h>
40 
41 #include <sys/tnf_probe.h>
42 
43 #include <sys/1394/h1394.h>
44 #include <sys/1394/ixl1394.h>
45 #include <sys/1394/adapters/hci1394.h>
46 
47 
48 /* Return values for local hci1394_ixl_intr_check_done() */
49 #define	IXL_CHECK_LOST	(-1)	/* ixl cmd intr processing lost */
50 #define	IXL_CHECK_DONE	0	/* ixl cmd intr processing done */
51 #define	IXL_CHECK_SKIP	1	/* ixl cmd intr processing context skipped */
52 #define	IXL_CHECK_STOP	2	/* ixl cmd intr processing context stopped */
53 
54 static boolean_t hci1394_ixl_intr_check_xfer(hci1394_state_t *soft_statep,
55     hci1394_iso_ctxt_t *ctxtp, ixl1394_command_t *ixlp,
56     ixl1394_command_t **ixlnextpp, uint16_t *timestampp, int *donecodep);
57 static int hci1394_ixl_intr_check_done(hci1394_state_t *soft_statep,
58     hci1394_iso_ctxt_t *ctxtp);
59 
60 /*
61  * hci1394_ixl_interrupt
62  *    main entry point (front-end) into interrupt processing.
63  *    acquires mutex, checks if update in progress, sets flags accordingly,
64  *    and calls to do real interrupt processing.
65  */
66 void
hci1394_ixl_interrupt(hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp,boolean_t in_stop)67 hci1394_ixl_interrupt(hci1394_state_t *soft_statep,
68     hci1394_iso_ctxt_t *ctxtp, boolean_t in_stop)
69 {
70 	uint_t	status;
71 	int	retcode;
72 
73 	TNF_PROBE_0_DEBUG(hci1394_ixl_interrupt_enter,
74 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
75 
76 	status = 1;
77 
78 	/* acquire the interrupt processing context mutex */
79 	mutex_enter(&ctxtp->intrprocmutex);
80 
81 	/* set flag to indicate that interrupt processing is required */
82 	ctxtp->intr_flags |= HCI1394_ISO_CTXT_INTRSET;
83 
84 	/* if update proc already in progress, let it handle intr processing */
85 	if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
86 		retcode = HCI1394_IXL_INTR_INUPDATE;
87 		status = 0;
88 		TNF_PROBE_1_DEBUG(hci1394_ixl_interrupt_error,
89 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
90 		    "HCI1394_IXL_INTR_INUPDATE");
91 
92 	} else if (ctxtp->intr_flags & HCI1394_ISO_CTXT_ININTR) {
93 		/* else fatal error if inter processing already in progress */
94 		retcode = HCI1394_IXL_INTR_ININTR;
95 		status = 0;
96 		TNF_PROBE_1(hci1394_ixl_interrupt_error,
97 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
98 		    "HCI1394_IXL_INTR_ININTR");
99 
100 	} else if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INCALL) {
101 		/* else fatal error if callback in progress flag is set */
102 		retcode = HCI1394_IXL_INTR_INCALL;
103 		status = 0;
104 		TNF_PROBE_1_DEBUG(hci1394_ixl_interrupt_error,
105 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
106 		    "HCI1394_IXL_INTR_INCALL");
107 	} else if (!in_stop && (ctxtp->intr_flags & HCI1394_ISO_CTXT_STOP)) {
108 		/* context is being stopped */
109 		retcode = HCI1394_IXL_INTR_STOP;
110 		status = 0;
111 		TNF_PROBE_1_DEBUG(hci1394_ixl_interrupt_error,
112 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
113 		    "HCI1394_IXL_INTR_STOP");
114 	}
115 
116 	/*
117 	 * if context is available, reserve it, do interrupt processing
118 	 * and free it
119 	 */
120 	if (status) {
121 		ctxtp->intr_flags |= HCI1394_ISO_CTXT_ININTR;
122 		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
123 		mutex_exit(&ctxtp->intrprocmutex);
124 
125 		retcode = hci1394_ixl_dma_sync(soft_statep, ctxtp);
126 
127 		mutex_enter(&ctxtp->intrprocmutex);
128 		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_ININTR;
129 
130 		/* notify stop thread that the interrupt is finished */
131 		if ((ctxtp->intr_flags & HCI1394_ISO_CTXT_STOP) && !in_stop) {
132 			cv_signal(&ctxtp->intr_cv);
133 		}
134 	};
135 
136 	/* free the intr processing context mutex before error checks */
137 	mutex_exit(&ctxtp->intrprocmutex);
138 
139 	/* if context stopped, invoke callback */
140 	if (retcode == HCI1394_IXL_INTR_DMASTOP) {
141 		hci1394_do_stop(soft_statep, ctxtp, B_TRUE, ID1394_DONE);
142 	}
143 	/* if error, stop and invoke callback */
144 	if (retcode == HCI1394_IXL_INTR_DMALOST) {
145 		hci1394_do_stop(soft_statep, ctxtp, B_TRUE, ID1394_FAIL);
146 	}
147 
148 	TNF_PROBE_0_DEBUG(hci1394_ixl_interrupt_exit,
149 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
150 }
151 
152 /*
153  * hci1394_ixl_dma_sync()
154  *    the heart of interrupt processing, this routine correlates where the
155  *    hardware is for the specified context with the IXL program.  Invokes
156  *    callbacks as needed.  Also called by "update" to make sure ixl is
157  *    sync'ed up with where the hardware is.
158  *    Returns one of the ixl_intr defined return codes - HCI1394_IXL_INTR...
159  *    {..._DMALOST, ..._DMASTOP, ..._NOADV,... _NOERROR}
160  */
161 int
hci1394_ixl_dma_sync(hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp)162 hci1394_ixl_dma_sync(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp)
163 {
164 	ixl1394_command_t *ixlp = NULL;	/* current ixl command */
165 	ixl1394_command_t *ixlnextp;	/* next ixl command */
166 	uint16_t	ixlopcode;
167 	uint16_t	timestamp;
168 	int		donecode;
169 	boolean_t	isdone;
170 
171 	void (*callback)(opaque_t, struct ixl1394_callback *);
172 
173 	TNF_PROBE_0_DEBUG(hci1394_ixl_dma_sync_enter,
174 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
175 
176 	ASSERT(MUTEX_NOT_HELD(&ctxtp->intrprocmutex));
177 
178 	/* xfer start ixl cmd where last left off */
179 	ixlnextp = ctxtp->ixl_execp;
180 
181 	/* last completed descriptor block's timestamp  */
182 	timestamp = ctxtp->dma_last_time;
183 
184 	/*
185 	 * follow execution path in IXL, until find dma descriptor in IXL
186 	 * xfer command whose status isn't set or until run out of IXL cmds
187 	 */
188 	while (ixlnextp != NULL) {
189 		ixlp = ixlnextp;
190 		ixlnextp = ixlp->next_ixlp;
191 		ixlopcode = ixlp->ixl_opcode & ~IXL1394_OPF_UPDATE;
192 
193 		/*
194 		 * process IXL commands: xfer start, callback, store timestamp
195 		 * and jump and ignore the others
196 		 */
197 
198 		/* determine if this is an xfer start IXL command */
199 		if (((ixlopcode & IXL1394_OPF_ISXFER) != 0) &&
200 		    ((ixlopcode & IXL1394_OPTY_MASK) != 0)) {
201 
202 			/* process xfer cmd to see if HW has been here */
203 			isdone = hci1394_ixl_intr_check_xfer(soft_statep, ctxtp,
204 			    ixlp, &ixlnextp, &timestamp, &donecode);
205 
206 			if (isdone == B_TRUE) {
207 				TNF_PROBE_0_DEBUG(hci1394_ixl_dma_sync_exit,
208 					HCI1394_TNF_HAL_STACK_ISOCH, "");
209 				return (donecode);
210 			}
211 
212 			/* continue to process next IXL command */
213 			continue;
214 		}
215 
216 		/* else check if IXL cmd - jump, callback or store timestamp */
217 		switch (ixlopcode) {
218 		case IXL1394_OP_JUMP:
219 			/*
220 			 * set next IXL cmd to label ptr in current IXL jump cmd
221 			 */
222 			ixlnextp = ((ixl1394_jump_t *)ixlp)->label;
223 			break;
224 
225 		case IXL1394_OP_STORE_TIMESTAMP:
226 			/*
227 			 * set last timestamp value recorded into current IXL
228 			 * cmd
229 			 */
230 			((ixl1394_store_timestamp_t *)ixlp)->timestamp =
231 			    timestamp;
232 			break;
233 
234 		case IXL1394_OP_CALLBACK:
235 			/*
236 			 * if callback function is specified, call it with IXL
237 			 * cmd addr.  Make sure to grab the lock before setting
238 			 * the "in callback" flag in intr_flags.
239 			 */
240 			mutex_enter(&ctxtp->intrprocmutex);
241 			ctxtp->intr_flags |= HCI1394_ISO_CTXT_INCALL;
242 			mutex_exit(&ctxtp->intrprocmutex);
243 
244 			callback = ((ixl1394_callback_t *)ixlp)->callback;
245 			if (callback != NULL) {
246 				callback(ctxtp->global_callback_arg,
247 				    (ixl1394_callback_t *)ixlp);
248 			}
249 
250 			/*
251 			 * And grab the lock again before clearing
252 			 * the "in callback" flag.
253 			 */
254 			mutex_enter(&ctxtp->intrprocmutex);
255 			ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INCALL;
256 			mutex_exit(&ctxtp->intrprocmutex);
257 			break;
258 		}
259 	}
260 
261 	/*
262 	 * If we jumped to NULL because of an updateable JUMP, set ixl_execp
263 	 * back to ixlp.  The destination label might get updated to a
264 	 * non-NULL value.
265 	 */
266 	if ((ixlp != NULL) && (ixlp->ixl_opcode == IXL1394_OP_JUMP_U)) {
267 		ctxtp->ixl_execp = ixlp;
268 		TNF_PROBE_1_DEBUG(hci1394_ixl_dma_sync_exit,
269 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
270 		    "INTR_NOERROR");
271 		return (HCI1394_IXL_INTR_NOERROR);
272 	}
273 
274 	/* save null IXL cmd and depth and last timestamp */
275 	ctxtp->ixl_execp = NULL;
276 	ctxtp->ixl_exec_depth = 0;
277 	ctxtp->dma_last_time = timestamp;
278 
279 	ctxtp->rem_noadv_intrs = 0;
280 
281 
282 	/* return stopped status if at end of IXL cmds & context stopped */
283 	if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
284 		TNF_PROBE_1_DEBUG(hci1394_ixl_dma_sync_exit,
285 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
286 		    "INTR_DMASTOP");
287 		return (HCI1394_IXL_INTR_DMASTOP);
288 	}
289 
290 	/* else interrupt processing is lost */
291 	TNF_PROBE_1_DEBUG(hci1394_ixl_dma_sync_exit,
292 	    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg, "INTR_DMALOST");
293 	return (HCI1394_IXL_INTR_DMALOST);
294 }
295 
296 /*
297  * hci1394_ixl_intr_check_xfer()
298  *    Process given IXL xfer cmd, checking status of each dma descriptor block
299  *    for the command until find one whose status isn't set or until full depth
300  *    reached at current IXL command or until find hardware skip has occurred.
301  *
302  *    Returns B_TRUE if processing should terminate (either have stopped
303  *    or encountered an error), and B_FALSE if it should continue looking.
304  *    If B_TRUE, donecodep contains the reason: HCI1394_IXL_INTR_DMALOST,
305  *    HCI1394_IXL_INTR_DMASTOP, HCI1394_IXL_INTR_NOADV, or
306  *    HCI1394_IXL_INTR_NOERROR.  NOERROR means that the current location
307  *    has been determined and do not need to look further.
308  */
309 static boolean_t
hci1394_ixl_intr_check_xfer(hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp,ixl1394_command_t * ixlp,ixl1394_command_t ** ixlnextpp,uint16_t * timestampp,int * donecodep)310 hci1394_ixl_intr_check_xfer(hci1394_state_t *soft_statep,
311     hci1394_iso_ctxt_t *ctxtp, ixl1394_command_t *ixlp,
312     ixl1394_command_t **ixlnextpp, uint16_t *timestampp, int *donecodep)
313 {
314 	uint_t		    dma_advances;
315 	int		    intrstatus;
316 	uint_t		    skipped;
317 	hci1394_xfer_ctl_t  *xferctlp;
318 	uint16_t	    ixldepth;
319 	uint16_t	    ixlopcode;
320 
321 
322 	TNF_PROBE_0_DEBUG(hci1394_ixl_intr_check_xfer_enter,
323 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
324 
325 	*donecodep = 0;
326 	dma_advances = 0;
327 	ixldepth = ctxtp->ixl_exec_depth;
328 	ixlopcode = ixlp->ixl_opcode & ~IXL1394_OPF_UPDATE;
329 
330 	/* get control struct for this xfer start IXL command */
331 	xferctlp = (hci1394_xfer_ctl_t *)ixlp->compiler_privatep;
332 
333 	skipped = 0;
334 	while ((skipped == 0) && (ixldepth < xferctlp->cnt)) {
335 		/*
336 		 * check if status is set in dma descriptor
337 		 * block at cur depth in cur xfer start IXL cmd
338 		 */
339 		if (hci1394_ixl_check_status(&xferctlp->dma[ixldepth],
340 		    ixlopcode, timestampp, B_TRUE) != 0) {
341 
342 			/* advance depth to next desc block in cur IXL cmd */
343 			ixldepth++;
344 
345 			/*
346 			 * count dma desc blks whose status was set
347 			 * (i.e. advanced to next dma desc)
348 			 */
349 			dma_advances++;
350 			continue;
351 		}
352 
353 		/* if get to here, status is not set */
354 
355 		/*
356 		 * cur IXL cmd dma desc status not set.  save IXL cur cmd
357 		 * and depth and last timestamp for next time.
358 		 */
359 		ctxtp->ixl_execp = ixlp;
360 		ctxtp->ixl_exec_depth = ixldepth;
361 		ctxtp->dma_last_time = *timestampp;
362 
363 		/*
364 		 * check if dma descriptor processing location is indeterminate
365 		 * (lost), context has either stopped, is done, or has skipped
366 		 */
367 		intrstatus = hci1394_ixl_intr_check_done(soft_statep, ctxtp);
368 		if (intrstatus == IXL_CHECK_LOST) {
369 			/*
370 			 * location indeterminate, try once more to determine
371 			 * current state.  First, recheck if status has become
372 			 * set in cur dma descriptor block.  (don't reset status
373 			 * here if is set)
374 			 */
375 			if (hci1394_ixl_check_status(&xferctlp->dma[ixldepth],
376 			    ixlopcode, timestampp, 1) != B_TRUE) {
377 				/* resume from where we left off */
378 				skipped = 0;
379 				continue;
380 			}
381 
382 			/*
383 			 * status not set, check intr processing
384 			 * completion status again
385 			 */
386 			if ((intrstatus = hci1394_ixl_intr_check_done(
387 				soft_statep, ctxtp)) == IXL_CHECK_LOST) {
388 				/*
389 				 * location still indeterminate,
390 				 * processing is lost
391 				 */
392 				*donecodep = HCI1394_IXL_INTR_DMALOST;
393 
394 				TNF_PROBE_1_DEBUG(
395 				    hci1394_ixl_intr_check_xfer_exit,
396 				    HCI1394_TNF_HAL_STACK_ISOCH, "",
397 				    tnf_string, msg, "INTR_DMALOST");
398 				return (B_TRUE);
399 			}
400 		}
401 
402 		/*
403 		 * if dma processing stopped. current location has been
404 		 * determined.
405 		 */
406 		if (intrstatus == IXL_CHECK_STOP) {
407 			/*
408 			 * save timestamp, clear currently executing IXL
409 			 * command and depth. return stopped.
410 			 */
411 			ctxtp->ixl_execp = NULL;
412 			ctxtp->ixl_exec_depth = 0;
413 			ctxtp->dma_last_time = *timestampp;
414 			ctxtp->rem_noadv_intrs = 0;
415 
416 			*donecodep = HCI1394_IXL_INTR_DMASTOP;
417 
418 			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_xfer_exit,
419 			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
420 			    "INTR_DMASTOP");
421 			return (B_TRUE);
422 		}
423 
424 		/*
425 		 * dma processing done for now. current location has
426 		 * has been determined
427 		 */
428 		if (intrstatus == IXL_CHECK_DONE) {
429 			/*
430 			 * if in update processing call:
431 			 *    clear update processing flag & return ok.
432 			 *    if dma advances happened, reset to max allowed.
433 			 *    however, if none have, don't reduce remaining
434 			 *    amount - that's for real interrupt call to adjust.
435 			 */
436 			if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
437 
438 				if (dma_advances > 0) {
439 					ctxtp->rem_noadv_intrs =
440 					    ctxtp->max_noadv_intrs;
441 				}
442 
443 				*donecodep = HCI1394_IXL_INTR_NOERROR;
444 
445 				TNF_PROBE_1_DEBUG(
446 				    hci1394_ixl_intr_check_xfer_exit,
447 				    HCI1394_TNF_HAL_STACK_ISOCH, "",
448 				    tnf_string, msg, "INTR_NOERROR");
449 				return (B_TRUE);
450 			}
451 
452 			/*
453 			 * else, not in update call processing, are in normal
454 			 * intr call.  if no dma statuses were found set
455 			 * (i.e. no dma advances), reduce remaining count of
456 			 * interrupts allowed with no I/O completions
457 			 */
458 			if (dma_advances == 0) {
459 				ctxtp->rem_noadv_intrs--;
460 			} else {
461 				/*
462 				 * else some dma statuses were found set.
463 				 * reinit remaining count of interrupts allowed
464 				 * with no I/O completions
465 				 */
466 				ctxtp->rem_noadv_intrs = ctxtp->max_noadv_intrs;
467 			}
468 
469 			/*
470 			 * if no remaining count of interrupts allowed with no
471 			 * I/O completions, return failure (no dma advance after
472 			 * max retries), else return ok
473 			 */
474 			if (ctxtp->rem_noadv_intrs == 0) {
475 				*donecodep = HCI1394_IXL_INTR_NOADV;
476 
477 				TNF_PROBE_1_DEBUG(
478 				    hci1394_ixl_intr_check_xfer_exit,
479 				    HCI1394_TNF_HAL_STACK_ISOCH, "",
480 				    tnf_string, msg, "INTR_NOADV");
481 				return (B_TRUE);
482 			}
483 
484 			*donecodep = HCI1394_IXL_INTR_NOERROR;
485 
486 			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_xfer_exit,
487 			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
488 			    "INTR_NOERROR2");
489 			return (B_TRUE);
490 		}
491 
492 		/*
493 		 * else (intrstatus == IXL_CHECK_SKIP) indicating skip has
494 		 * occured, retrieve current IXL cmd, depth, and timestamp and
495 		 * continue interrupt processing
496 		 */
497 		skipped = 1;
498 		*ixlnextpp = ctxtp->ixl_execp;
499 		ixldepth = ctxtp->ixl_exec_depth;
500 		*timestampp = ctxtp->dma_last_time;
501 
502 		/*
503 		 * also count as 1, intervening skips to next posted
504 		 * dma descriptor.
505 		 */
506 		dma_advances++;
507 	}
508 
509 	/*
510 	 * if full depth reached at current IXL cmd, set back to start for next
511 	 * IXL xfer command that will be processed
512 	 */
513 	if ((skipped == 0) && (ixldepth >= xferctlp->cnt)) {
514 		ctxtp->ixl_exec_depth = 0;
515 	}
516 
517 	/*
518 	 * make sure rem_noadv_intrs is reset to max if we advanced.
519 	 */
520 	if (dma_advances > 0) {
521 		ctxtp->rem_noadv_intrs = ctxtp->max_noadv_intrs;
522 	}
523 
524 	TNF_PROBE_0_DEBUG(hci1394_ixl_intr_check_xfer_exit,
525 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
526 
527 	/* continue to process next IXL command */
528 	return (B_FALSE);
529 }
530 
531 /*
532  * hci1394_ixl_intr_check_done()
533  *    checks if context has stopped, or if able to match hardware location
534  *    with an expected IXL program location.
535  */
536 static int
hci1394_ixl_intr_check_done(hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp)537 hci1394_ixl_intr_check_done(hci1394_state_t *soft_statep,
538     hci1394_iso_ctxt_t *ctxtp)
539 {
540 	ixl1394_command_t   *ixlp;
541 	hci1394_xfer_ctl_t  *xferctlp;
542 	uint_t		    ixldepth;
543 	hci1394_xfer_ctl_dma_t *dma;
544 	ddi_acc_handle_t    acc_hdl;
545 	ddi_dma_handle_t    dma_hdl;
546 	uint32_t	    desc_status;
547 	hci1394_desc_t	    *hcidescp;
548 	off_t		    hcidesc_off;
549 	int		    err;
550 	uint32_t	    dma_cmd_cur_loc;
551 	uint32_t	    dma_cmd_last_loc;
552 	uint32_t	    dma_loc_check_enabled;
553 	uint32_t	    dmastartp;
554 	uint32_t	    dmaendp;
555 
556 	uint_t		    rem_dma_skips;
557 	uint16_t	    skipmode;
558 	uint16_t	    skipdepth;
559 	ixl1394_command_t   *skipdestp;
560 	ixl1394_command_t   *skipxferp;
561 
562 	TNF_PROBE_0_DEBUG(hci1394_ixl_intr_check_done_enter,
563 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
564 
565 	/*
566 	 * start looking through the IXL list from the xfer start command where
567 	 * we last left off (for composite opcodes, need to start from the
568 	 * appropriate depth).
569 	 */
570 
571 	ixlp = ctxtp->ixl_execp;
572 	ixldepth = ctxtp->ixl_exec_depth;
573 
574 	/* control struct for xfer start IXL command */
575 	xferctlp = (hci1394_xfer_ctl_t *)ixlp->compiler_privatep;
576 	dma = &xferctlp->dma[ixldepth];
577 
578 	/* determine if dma location checking is enabled */
579 	if ((dma_loc_check_enabled =
580 	    (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_CMDREG)) != 0) {
581 
582 		/* if so, get current dma command location */
583 		dma_cmd_last_loc = 0xFFFFFFFF;
584 
585 		while ((dma_cmd_cur_loc = HCI1394_ISOCH_CTXT_CMD_PTR(
586 		    soft_statep, ctxtp)) != dma_cmd_last_loc) {
587 
588 			/* retry get until location register stabilizes */
589 			dma_cmd_last_loc = dma_cmd_cur_loc;
590 		}
591 	}
592 
593 	/*
594 	 * compare the (bound) address of the DMA descriptor corresponding to
595 	 * the current xfer IXL command against the current value in the
596 	 * DMA location register.  If exists and if matches, then
597 	 *    if context stopped, return stopped, else return done.
598 	 *
599 	 * The dma start address is the first address of the descriptor block.
600 	 * Since "Z" is a count of 16-byte descriptors in the block, calculate
601 	 * the end address by adding Z*16 to the start addr.
602 	 */
603 	dmastartp = dma->dma_bound & ~DESC_Z_MASK;
604 	dmaendp = dmastartp + ((dma->dma_bound & DESC_Z_MASK) << 4);
605 
606 	if (dma_loc_check_enabled &&
607 	    ((dma_cmd_cur_loc >= dmastartp) && (dma_cmd_cur_loc < dmaendp))) {
608 
609 		if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
610 			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
611 			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
612 			    "CHECK_STOP");
613 			return (IXL_CHECK_STOP);
614 		}
615 
616 		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
617 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
618 		    "CHECK_DONE");
619 		return (IXL_CHECK_DONE);
620 	}
621 
622 	/*
623 	 * if receive mode:
624 	 */
625 	if ((ixlp->ixl_opcode & IXL1394_OPF_ONXMIT) == 0)  {
626 		/*
627 		 * if context stopped, return stopped, else,
628 		 * if there is no current dma location reg, return done
629 		 * else return location indeterminate
630 		 */
631 		if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
632 			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
633 			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
634 			    "CHECK_STOP");
635 			return (IXL_CHECK_STOP);
636 		}
637 		if (!dma_loc_check_enabled) {
638 			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
639 			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
640 			    "CHECK_DONE");
641 			return (IXL_CHECK_DONE);
642 		}
643 
644 		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
645 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
646 		    "CHECK_LOST");
647 		return (IXL_CHECK_LOST);
648 	}
649 
650 	/*
651 	 * else is xmit mode:
652 	 * check status of current xfer IXL command's dma descriptor
653 	 */
654 	acc_hdl  = dma->dma_buf->bi_handle;
655 	dma_hdl  = dma->dma_buf->bi_dma_handle;
656 	hcidescp = (hci1394_desc_t *)dma->dma_descp;
657 	hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
658 
659 	/* Sync the descriptor before we get the status */
660 	err = ddi_dma_sync(dma_hdl, hcidesc_off, sizeof (hci1394_desc_t),
661 	    DDI_DMA_SYNC_FORCPU);
662 	if (err != DDI_SUCCESS) {
663 		TNF_PROBE_1(hci1394_ixl_intr_check_done_error,
664 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
665 		    "dma_sync() failed");
666 	}
667 	desc_status = ddi_get32(acc_hdl, &hcidescp->status);
668 
669 	if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
670 
671 		/*
672 		 * if status is now set here, return skipped, to cause calling
673 		 * function to continue, even though location hasn't changed
674 		 */
675 		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
676 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
677 		    "CHECK_SKIP");
678 		return (IXL_CHECK_SKIP);
679 	}
680 
681 	/*
682 	 * At this point, we have gotten to a DMA descriptor with an empty
683 	 * status.  This is not enough information however to determine that
684 	 * we've found all processed DMA descriptors because during cycle-lost
685 	 * conditions, the HW will skip over some descriptors without writing
686 	 * status.  So we have to look ahead until we're convinced that the HW
687 	 * hasn't jumped ahead.
688 	 *
689 	 * Follow the IXL skip-to links until find one whose status is set
690 	 * or until dma location register (if any) matches an xfer IXL
691 	 * command's dma location or until have examined max_dma_skips
692 	 * IXL commands.
693 	 */
694 	rem_dma_skips = ctxtp->max_dma_skips;
695 
696 	while (rem_dma_skips-- > 0) {
697 
698 		/*
699 		 * get either IXL command specific or
700 		 * system default skipmode info
701 		 */
702 		skipdepth = 0;
703 		if (xferctlp->skipmodep != NULL) {
704 			skipmode  = xferctlp->skipmodep->skipmode;
705 			skipdestp = xferctlp->skipmodep->label;
706 			skipxferp = (ixl1394_command_t *)
707 			    xferctlp->skipmodep->compiler_privatep;
708 		} else {
709 			skipmode  = ctxtp->default_skipmode;
710 			skipdestp = ctxtp->default_skiplabelp;
711 			skipxferp = ctxtp->default_skipxferp;
712 		}
713 
714 		switch (skipmode) {
715 
716 		case IXL1394_SKIP_TO_SELF:
717 			/*
718 			 * mode is skip to self:
719 			 *   if context is stopped, return stopped, else
720 			 *   if dma location reg not enabled, return done
721 			 *   else, return location indeterminate
722 			 */
723 			if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
724 			    0) {
725 				TNF_PROBE_1_DEBUG(
726 					hci1394_ixl_intr_check_done_exit,
727 					HCI1394_TNF_HAL_STACK_ISOCH, "",
728 					tnf_string, msg, "CHECK_STOP");
729 				return (IXL_CHECK_STOP);
730 			}
731 
732 			if (!dma_loc_check_enabled) {
733 				TNF_PROBE_1_DEBUG(
734 					hci1394_ixl_intr_check_done_exit,
735 					HCI1394_TNF_HAL_STACK_ISOCH, "",
736 					tnf_string, msg, "CHECK_DONE");
737 				return (IXL_CHECK_DONE);
738 			}
739 
740 			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
741 			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
742 			    "CHECK_LOST");
743 			return (IXL_CHECK_LOST);
744 
745 		case IXL1394_SKIP_TO_NEXT:
746 			/*
747 			 * mode is skip to next:
748 			 *    set potential skip target to current command at
749 			 *    next depth
750 			 */
751 			skipdestp = ixlp;
752 			skipxferp = ixlp;
753 			skipdepth = ixldepth + 1;
754 
755 			/*
756 			 * else if at max depth at current cmd adjust to next
757 			 * IXL command.
758 			 *
759 			 * (NOTE: next means next IXL command along execution
760 			 * path,  whatever IXL command it might be.  e.g. store
761 			 * timestamp or callback or label or jump or send... )
762 			 */
763 			if (skipdepth >= xferctlp->cnt) {
764 				skipdepth = 0;
765 				skipdestp = ixlp->next_ixlp;
766 				skipxferp = xferctlp->execp;
767 			}
768 
769 			/* evaluate skip to status further, below */
770 			break;
771 
772 
773 		case IXL1394_SKIP_TO_LABEL:
774 			/*
775 			 * mode is skip to label:
776 			 *    set skip destination depth to 0 (should be
777 			 *    redundant)
778 			 */
779 			skipdepth = 0;
780 
781 			/* evaluate skip to status further, below */
782 			break;
783 
784 		case IXL1394_SKIP_TO_STOP:
785 			/*
786 			 * mode is skip to stop:
787 			 *    set all xfer and destination skip to locations to
788 			 *    null
789 			 */
790 			skipxferp = NULL;
791 			skipdestp = NULL;
792 			skipdepth = 0;
793 
794 			/* evaluate skip to status further, below */
795 			break;
796 
797 		} /* end switch */
798 
799 		/*
800 		 * if no xfer IXL command follows at or after current skip-to
801 		 * location
802 		 */
803 		if (skipxferp == NULL) {
804 			/*
805 			 *   if context is stopped, return stopped, else
806 			 *   if dma location reg not enabled, return done
807 			 *   else, return location indeterminate
808 			 */
809 			if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
810 			    0) {
811 				TNF_PROBE_1_DEBUG(
812 					hci1394_ixl_intr_check_done_exit,
813 					HCI1394_TNF_HAL_STACK_ISOCH, "",
814 					tnf_string, msg, "CHECK_STOP");
815 				return (IXL_CHECK_STOP);
816 			}
817 
818 			if (!dma_loc_check_enabled) {
819 				TNF_PROBE_1_DEBUG(
820 					hci1394_ixl_intr_check_done_exit,
821 					HCI1394_TNF_HAL_STACK_ISOCH, "",
822 					tnf_string, msg, "CHECK_DONE");
823 				return (IXL_CHECK_DONE);
824 			}
825 			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
826 			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
827 			    "CHECK_LOST");
828 			return (IXL_CHECK_LOST);
829 		}
830 
831 		/*
832 		 * if the skip to xfer IXL dma descriptor's status is set,
833 		 * then execution did skip
834 		 */
835 		xferctlp = (hci1394_xfer_ctl_t *)skipxferp->compiler_privatep;
836 		dma	 = &xferctlp->dma[skipdepth];
837 		acc_hdl  = dma->dma_buf->bi_handle;
838 		dma_hdl  = dma->dma_buf->bi_dma_handle;
839 		hcidescp = (hci1394_desc_t *)dma->dma_descp;
840 		hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
841 
842 		/* Sync the descriptor before we get the status */
843 		err = ddi_dma_sync(dma_hdl, hcidesc_off,
844 		    sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
845 		if (err != DDI_SUCCESS) {
846 			TNF_PROBE_1(hci1394_ixl_intr_check_done_error,
847 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
848 			    "dma_sync() failed");
849 		}
850 		desc_status = ddi_get32(acc_hdl, &hcidescp->status);
851 
852 		if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
853 
854 			/*
855 			 * adjust to continue from skip to IXL command and
856 			 * return skipped, to have calling func continue.
857 			 * (Note: next IXL command may be any allowed IXL
858 			 * command)
859 			 */
860 			ctxtp->ixl_execp = skipdestp;
861 			ctxtp->ixl_exec_depth = skipdepth;
862 
863 			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
864 			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
865 			    "CHECK_SKIP");
866 			return (IXL_CHECK_SKIP);
867 		}
868 
869 		/*
870 		 * if dma location command register checking is enabled,
871 		 * and the skip to xfer IXL dma location matches current
872 		 * dma location register value, execution did skip
873 		 */
874 		dmastartp = dma->dma_bound & ~DESC_Z_MASK;
875 		dmaendp = dmastartp + ((dma->dma_bound & DESC_Z_MASK) << 4);
876 
877 		if (dma_loc_check_enabled && ((dma_cmd_cur_loc >= dmastartp) &&
878 		    (dma_cmd_cur_loc < dmaendp))) {
879 
880 			/* if the context is stopped, return stopped */
881 			if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) ==
882 			    0) {
883 				TNF_PROBE_1_DEBUG(
884 					hci1394_ixl_intr_check_done_exit,
885 					HCI1394_TNF_HAL_STACK_ISOCH, "",
886 					tnf_string, msg, "CHECK STOP");
887 				return (IXL_CHECK_STOP);
888 			}
889 			/*
890 			 * adjust to continue from skip to IXL command and
891 			 * return skipped, to have calling func continue
892 			 * (Note: next IXL command may be any allowed IXL cmd)
893 			 */
894 			ctxtp->ixl_execp = skipdestp;
895 			ctxtp->ixl_exec_depth = skipdepth;
896 
897 			TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
898 			    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
899 			    "CHECK_SKIP");
900 			return (IXL_CHECK_SKIP);
901 		}
902 
903 		/*
904 		 * else, advance working current locn to skipxferp and
905 		 * skipdepth and continue skip evaluation loop processing
906 		 */
907 		ixlp = skipxferp;
908 		ixldepth = skipdepth;
909 
910 	} /* end while */
911 
912 	/*
913 	 * didn't find dma status set, nor location reg match, along skip path
914 	 *
915 	 * if context is stopped, return stopped,
916 	 *
917 	 * else if no current location reg active don't change context values,
918 	 * just return done (no skip)
919 	 *
920 	 * else, return location indeterminate
921 	 */
922 
923 	if (HCI1394_ISOCH_CTXT_ACTIVE(soft_statep, ctxtp) == 0) {
924 		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
925 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
926 		    "CHECK_STOP");
927 		return (IXL_CHECK_STOP);
928 	}
929 	if (!dma_loc_check_enabled) {
930 		TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
931 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
932 		    "CHECK_DONE");
933 		return (IXL_CHECK_DONE);
934 	}
935 
936 	TNF_PROBE_1_DEBUG(hci1394_ixl_intr_check_done_exit,
937 	    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg, "CHECK_LOST");
938 	return (IXL_CHECK_LOST);
939 }
940 
941 /*
942  * hci1394_isoch_cycle_inconsistent()
943  *    Called during interrupt notification to indicate that the cycle time
944  *    has changed unexpectedly.  We need to take this opportunity to
945  *    update our tracking of each running transmit context's execution.
946  *    cycle_inconsistent only affects transmit, so recv contexts are left alone.
947  */
948 void
hci1394_isoch_cycle_inconsistent(hci1394_state_t * soft_statep)949 hci1394_isoch_cycle_inconsistent(hci1394_state_t *soft_statep)
950 {
951 	int i, cnt_thresh;
952 	boolean_t note;
953 	hrtime_t current_time, last_time, delta, delta_thresh;
954 	hci1394_iso_ctxt_t *ctxtp; 	/* current context */
955 
956 	ASSERT(soft_statep);
957 	TNF_PROBE_0_DEBUG(hci1394_isoch_cycle_inconsistent_enter,
958 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
959 
960 	hci1394_ohci_intr_clear(soft_statep->ohci, OHCI_INTR_CYC_INCONSISTENT);
961 
962 	/* grab the mutex before checking each context's INUSE and RUNNING */
963 	mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
964 
965 	/* check for transmit contexts which are inuse and running */
966 	for (i = 0; i < soft_statep->isoch->ctxt_xmit_count; i++) {
967 		ctxtp = &soft_statep->isoch->ctxt_xmit[i];
968 
969 		if ((ctxtp->ctxt_flags &
970 		    (HCI1394_ISO_CTXT_INUSE | HCI1394_ISO_CTXT_RUNNING)) != 0) {
971 
972 			mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
973 			hci1394_ixl_interrupt(soft_statep, ctxtp, B_FALSE);
974 			mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
975 		}
976 	}
977 
978 	/*
979 	 * get the current time and calculate the delta between now and
980 	 * when the last interrupt was processed.  (NOTE: if the time
981 	 * returned by gethrtime() rolls-over while we are counting these
982 	 * interrupts, we will incorrectly restart the counting process.
983 	 * However, because the probability of this happening is small and
984 	 * not catching the roll-over will AT MOST double the time it takes
985 	 * us to discover and correct from this condition, we can safely
986 	 * ignore it.)
987 	 */
988 	current_time = gethrtime();
989 	last_time = soft_statep->isoch->cycle_incon_thresh.last_intr_time;
990 	delta = current_time - last_time;
991 
992 	/*
993 	 * compare the calculated delta to the delta T threshold.  If it
994 	 * is less than the threshold, then increment the counter.  If it
995 	 * is not then reset the counter.
996 	 */
997 	delta_thresh = soft_statep->isoch->cycle_incon_thresh.delta_t_thresh;
998 	if (delta < delta_thresh)
999 		soft_statep->isoch->cycle_incon_thresh.delta_t_counter++;
1000 	else
1001 		soft_statep->isoch->cycle_incon_thresh.delta_t_counter = 0;
1002 
1003 	/*
1004 	 * compare the counter to the counter threshold.  If it is greater,
1005 	 * then disable the cycle inconsistent interrupt.
1006 	 */
1007 	cnt_thresh = soft_statep->isoch->cycle_incon_thresh.counter_thresh;
1008 	note = B_FALSE;
1009 	if (soft_statep->isoch->cycle_incon_thresh.delta_t_counter >
1010 	    cnt_thresh) {
1011 		hci1394_ohci_intr_disable(soft_statep->ohci,
1012 		    OHCI_INTR_CYC_INCONSISTENT);
1013 		note = B_TRUE;
1014 	}
1015 
1016 	/* save away the current time into the last_intr_time field */
1017 	soft_statep->isoch->cycle_incon_thresh.last_intr_time = current_time;
1018 
1019 	mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
1020 
1021 	if (note == B_TRUE) {
1022 		cmn_err(CE_NOTE, "!hci1394(%d): cycle_inconsistent interrupt "
1023 		    "disabled until next bus reset",
1024 		    soft_statep->drvinfo.di_instance);
1025 		TNF_PROBE_1(hci1394_isoch_cycle_inconsistent_error,
1026 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, msg,
1027 		    "CYCLE_INCONSISTENT intr disabled until next bus reset");
1028 	}
1029 
1030 	TNF_PROBE_0_DEBUG(hci1394_isoch_cycle_inconsistent_exit,
1031 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1032 }
1033 
1034 
1035 /*
1036  * hci1394_isoch_cycle_lost()
1037  *    Interrupt indicates an expected cycle_start packet (and therefore our
1038  *    opportunity to transmit) did not show up.  Update our tracking of each
1039  *    running transmit context.
1040  */
1041 void
hci1394_isoch_cycle_lost(hci1394_state_t * soft_statep)1042 hci1394_isoch_cycle_lost(hci1394_state_t *soft_statep)
1043 {
1044 	int i, cnt_thresh;
1045 	boolean_t note;
1046 	hrtime_t current_time, last_time, delta, delta_thresh;
1047 	hci1394_iso_ctxt_t *ctxtp; 	/* current context */
1048 
1049 	ASSERT(soft_statep);
1050 	TNF_PROBE_0_DEBUG(hci1394_isoch_cycle_lost_enter,
1051 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1052 
1053 	hci1394_ohci_intr_clear(soft_statep->ohci, OHCI_INTR_CYC_LOST);
1054 
1055 	/* grab the mutex before checking each context's INUSE and RUNNING */
1056 	mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
1057 
1058 	/* check for transmit contexts which are inuse and running */
1059 	for (i = 0; i < soft_statep->isoch->ctxt_xmit_count; i++) {
1060 		ctxtp = &soft_statep->isoch->ctxt_xmit[i];
1061 
1062 		if ((ctxtp->ctxt_flags &
1063 		    (HCI1394_ISO_CTXT_INUSE | HCI1394_ISO_CTXT_RUNNING)) != 0) {
1064 
1065 			mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
1066 			hci1394_ixl_interrupt(soft_statep, ctxtp, B_FALSE);
1067 			mutex_enter(&soft_statep->isoch->ctxt_list_mutex);
1068 		}
1069 	}
1070 
1071 	/*
1072 	 * get the current time and calculate the delta between now and
1073 	 * when the last interrupt was processed.  (NOTE: if the time
1074 	 * returned by gethrtime() rolls-over while we are counting these
1075 	 * interrupts, we will incorrectly restart the counting process.
1076 	 * However, because the probability of this happening is small and
1077 	 * not catching the roll-over will AT MOST double the time it takes
1078 	 * us to discover and correct from this condition, we can safely
1079 	 * ignore it.)
1080 	 */
1081 	current_time = gethrtime();
1082 	last_time = soft_statep->isoch->cycle_lost_thresh.last_intr_time;
1083 	delta = current_time - last_time;
1084 
1085 	/*
1086 	 * compare the calculated delta to the delta T threshold.  If it
1087 	 * is less than the threshold, then increment the counter.  If it
1088 	 * is not then reset the counter.
1089 	 */
1090 	delta_thresh = soft_statep->isoch->cycle_lost_thresh.delta_t_thresh;
1091 	if (delta < delta_thresh)
1092 		soft_statep->isoch->cycle_lost_thresh.delta_t_counter++;
1093 	else
1094 		soft_statep->isoch->cycle_lost_thresh.delta_t_counter = 0;
1095 
1096 	/*
1097 	 * compare the counter to the counter threshold.  If it is greater,
1098 	 * then disable the cycle lost interrupt.
1099 	 */
1100 	cnt_thresh = soft_statep->isoch->cycle_lost_thresh.counter_thresh;
1101 	note = B_FALSE;
1102 	if (soft_statep->isoch->cycle_lost_thresh.delta_t_counter >
1103 	    cnt_thresh) {
1104 		hci1394_ohci_intr_disable(soft_statep->ohci,
1105 		    OHCI_INTR_CYC_LOST);
1106 		note = B_TRUE;
1107 	}
1108 
1109 	/* save away the current time into the last_intr_time field */
1110 	soft_statep->isoch->cycle_lost_thresh.last_intr_time = current_time;
1111 
1112 	mutex_exit(&soft_statep->isoch->ctxt_list_mutex);
1113 
1114 	if (note == B_TRUE) {
1115 		cmn_err(CE_NOTE, "!hci1394(%d): cycle_lost interrupt "
1116 		    "disabled until next bus reset",
1117 		    soft_statep->drvinfo.di_instance);
1118 		TNF_PROBE_1(hci1394_isoch_cycle_lost_error,
1119 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, msg,
1120 		    "CYCLE_LOST intr disabled until next bus reset");
1121 	}
1122 
1123 	TNF_PROBE_0_DEBUG(hci1394_isoch_cycle_lost_exit,
1124 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1125 }
1126