xref: /titanic_41/usr/src/uts/common/io/1394/adapters/hci1394_ixl_update.c (revision 80a70ef316c0bf76271850dd8713d36bd4e96192)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * hci1394_ixl_update.c
30  *    Isochronous IXL update routines.
31  *    Routines used to dynamically update a compiled and presumably running
32  *    IXL program.
33  */
34 
35 #include <sys/kmem.h>
36 #include <sys/types.h>
37 #include <sys/conf.h>
38 #include <sys/disp.h>
39 
40 #include <sys/tnf_probe.h>
41 
42 #include <sys/1394/h1394.h>
43 #include <sys/1394/ixl1394.h>	/* IXL opcodes & data structs */
44 
45 #include <sys/1394/adapters/hci1394.h>
46 
47 
48 /* local defines for hci1394_ixl_update_prepare return codes */
49 #define	IXL_PREP_READY	    1
50 #define	IXL_PREP_SUCCESS    0
51 #define	IXL_PREP_FAILURE    (-1)
52 
53 /*
54  * variable used to indicate the number of times update will wait for
55  * interrupt routine to complete.
56  */
57 int hci1394_upd_retries_before_fail = 50;
58 
59 /* IXL runtime update static functions */
60 static int hci1394_ixl_update_prepare(hci1394_ixl_update_vars_t *uvp);
61 static int hci1394_ixl_update_prep_jump(hci1394_ixl_update_vars_t *uvp);
62 static int hci1394_ixl_update_prep_set_skipmode(hci1394_ixl_update_vars_t *uvp);
63 static int hci1394_ixl_update_prep_set_tagsync(hci1394_ixl_update_vars_t *uvp);
64 static int hci1394_ixl_update_prep_recv_pkt(hci1394_ixl_update_vars_t *uvp);
65 static int hci1394_ixl_update_prep_recv_buf(hci1394_ixl_update_vars_t *uvp);
66 static int hci1394_ixl_update_prep_send_pkt(hci1394_ixl_update_vars_t *uvp);
67 static int hci1394_ixl_update_prep_send_buf(hci1394_ixl_update_vars_t *uvp);
68 static int hci1394_ixl_update_perform(hci1394_ixl_update_vars_t *uvp);
69 static int hci1394_ixl_update_evaluate(hci1394_ixl_update_vars_t *uvp);
70 static int hci1394_ixl_update_analysis(hci1394_ixl_update_vars_t *uvp);
71 static void hci1394_ixl_update_set_locn_info(hci1394_ixl_update_vars_t *uvp);
72 static int hci1394_ixl_update_enable(hci1394_ixl_update_vars_t *uvp);
73 static int hci1394_ixl_update_endup(hci1394_ixl_update_vars_t *uvp);
74 
75 /*
76  *	IXL commands and included fields which can be updated
77  * IXL1394_OP_CALLBACK:		callback(), callback_data
78  * IXL1394_OP_JUMP:		label
79  * IXL1394_OP_RECV_PKT		ixl_buf, size, mem_bufp
80  * IXL1394_OP_RECV_PKT_ST	ixl_buf, size, mem_bufp
81  * IXL1394_OP_RECV_BUF(ppb)	ixl_buf, size, pkt_size, mem_bufp, buf_offset
82  * IXL1394_OP_RECV_BUF(fill)	ixl_buf, size, pkt_size, mem_bufp, buf_offset
83  * IXL1394_OP_SEND_PKT		ixl_buf, size, mem_bufp
84  * IXL1394_OP_SEND_PKT_ST	ixl_buf, size, mem_bufp
85  * IXL1394_OP_SEND_PKT_WHDR_ST	ixl_buf, size, mem_bufp
86  * IXL1394_OP_SEND_BUF		ixl_buf, size, pkt_size, mem_bufp, buf_offset
87  * IXL1394_OP_SET_TAGSYNC	tag, sync
88  * IXL1394_OP_SET_SKIPMODE	skipmode, label
89  *
90  *	IXL commands which can not be updated
91  * IXL1394_OP_LABEL
92  * IXL1394_OP_SEND_HDR_ONLY
93  * IXL1394_OP_SEND_NOPKT
94  * IXL1394_OP_STORE_VALUE
95  * IXL1394_OP_STORE_TIMESTAMP
96  * IXL1394_OP_SET_SYNCWAIT
97  */
98 
99 /*
100  * hci1394_ixl_update
101  *    main entrypoint into dynamic update code: initializes temporary
102  *    update variables, evaluates request, coordinates with potentially
103  *    simultaneous run of interrupt stack, evaluates likelyhood of success,
104  *    performs the update, checks if completed, performs cleanup
105  *    resulting from coordination with interrupt stack.
106  */
107 int
hci1394_ixl_update(hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp,ixl1394_command_t * ixlnewp,ixl1394_command_t * ixloldp,uint_t riskoverride,int * resultp)108 hci1394_ixl_update(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
109     ixl1394_command_t *ixlnewp, ixl1394_command_t *ixloldp,
110     uint_t riskoverride, int *resultp)
111 {
112 	hci1394_ixl_update_vars_t uv;	/* update work variables structure */
113 	int prepstatus;
114 	int ret;
115 
116 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_enter,
117 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
118 
119 
120 	/* save caller specified values in update work variables structure */
121 	uv.soft_statep = soft_statep;
122 	uv.ctxtp = ctxtp;
123 	uv.ixlnewp = ixlnewp;
124 	uv.ixloldp = ixloldp;
125 	uv.risklevel = riskoverride;
126 
127 	/* initialize remainder of update work variables */
128 	uv.ixlxferp = NULL;
129 	uv.skipxferp = NULL;
130 	uv.skipmode = 0;
131 	uv.skipaddr = 0;
132 	uv.jumpaddr = 0;
133 	uv.pkthdr1 = 0;
134 	uv.pkthdr2 = 0;
135 	uv.bufaddr = 0;
136 	uv.bufsize = 0;
137 	uv.ixl_opcode = uv.ixlnewp->ixl_opcode;
138 	uv.hcihdr = 0;
139 	uv.hcistatus = 0;
140 	uv.hci_offset = 0;
141 	uv.hdr_offset = 0;
142 
143 	/* set done ok return status */
144 	uv.upd_status = 0;
145 
146 	/* evaluate request and prepare to perform update */
147 	prepstatus = hci1394_ixl_update_prepare(&uv);
148 	if (prepstatus != IXL_PREP_READY) {
149 		/*
150 		 * if either done or nothing to do or an evaluation error,
151 		 * return update status
152 		 */
153 		*resultp = uv.upd_status;
154 
155 		/* if prep evaluation error, return failure */
156 		if (prepstatus != IXL_PREP_SUCCESS) {
157 			TNF_PROBE_1_DEBUG(hci1394_ixl_update_error,
158 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, msg,
159 			    "IXL_PREP_FAILURE");
160 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
161 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
162 			return (DDI_FAILURE);
163 		}
164 		/* if no action or update done, return update successful */
165 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
166 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
167 		return (DDI_SUCCESS);
168 	}
169 
170 	/* perform update processing reservation of interrupt context */
171 	ret = hci1394_ixl_update_enable(&uv);
172 	if (ret != DDI_SUCCESS) {
173 
174 		/* error acquiring control of context - return */
175 		*resultp = uv.upd_status;
176 
177 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
178 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
179 		return (DDI_FAILURE);
180 	}
181 
182 	/* perform update risk analysis */
183 	if (hci1394_ixl_update_analysis(&uv) != DDI_SUCCESS) {
184 		/*
185 		 * return, if excessive risk or dma execution processing lost
186 		 * (note: caller risk override not yet implemented)
187 		 */
188 
189 		/* attempt intr processing cleanup, unless err is dmalost */
190 		if (uv.upd_status != IXL1394_EPRE_UPD_DMALOST) {
191 			(void) hci1394_ixl_update_endup(&uv);
192 		} else {
193 			/*
194 			 * error is dmalost, just release interrupt context.
195 			 * take the lock here to ensure an atomic read, modify,
196 			 * write of the "intr_flags" field while we try to
197 			 * clear the "in update" flag.  protects from the
198 			 * interrupt routine.
199 			 */
200 			mutex_enter(&ctxtp->intrprocmutex);
201 			ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
202 			mutex_exit(&ctxtp->intrprocmutex);
203 		}
204 		*resultp = uv.upd_status;
205 
206 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
207 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
208 		return (DDI_FAILURE);
209 	}
210 
211 
212 	/* perform requested update */
213 	if (hci1394_ixl_update_perform(&uv) != DDI_SUCCESS) {
214 		/*
215 		 * if non-completion condition, return update status
216 		 * attempt interrupt processing cleanup first
217 		 */
218 		(void) hci1394_ixl_update_endup(&uv);
219 
220 		*resultp = uv.upd_status;
221 
222 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
223 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
224 		return (DDI_FAILURE);
225 	}
226 
227 	/* evaluate update completion, setting completion status */
228 	if (hci1394_ixl_update_evaluate(&uv) != DDI_SUCCESS) {
229 		/*
230 		 * update failed - bad, just release interrupt context
231 		 * take the lock here too (jsut like above) to ensure an
232 		 * atomic read, modify, write of the "intr_flags" field
233 		 * while we try to clear the "in update" flag.  protects
234 		 * from the interrupt routine.
235 		 */
236 		mutex_enter(&ctxtp->intrprocmutex);
237 		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
238 		mutex_exit(&ctxtp->intrprocmutex);
239 
240 		/* if DMA stopped or lost, formally stop context */
241 		if (uv.upd_status == HCI1394_IXL_INTR_DMASTOP) {
242 			hci1394_do_stop(soft_statep, ctxtp, B_TRUE,
243 			    ID1394_DONE);
244 		} else if (uv.upd_status == HCI1394_IXL_INTR_DMALOST) {
245 			hci1394_do_stop(soft_statep, ctxtp, B_TRUE,
246 			    ID1394_FAIL);
247 		}
248 
249 		*resultp = uv.upd_status;
250 
251 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit,
252 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
253 		return (DDI_FAILURE);
254 	}
255 
256 	/* perform interrupt processing cleanup */
257 	uv.upd_status = hci1394_ixl_update_endup(&uv);
258 
259 	/* return update completion status */
260 	*resultp = uv.upd_status;
261 
262 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_exit, HCI1394_TNF_HAL_STACK_ISOCH,
263 	    "");
264 	return (DDI_SUCCESS);
265 }
266 
267 /*
268  * hci1394_ixl_update_enable
269  *	Used to coordinate dynamic update activities with simultaneous
270  *	interrupt handler processing, while holding the context mutex
271  *      for as short a time as possible.
272  */
273 static int
hci1394_ixl_update_enable(hci1394_ixl_update_vars_t * uvp)274 hci1394_ixl_update_enable(hci1394_ixl_update_vars_t *uvp)
275 {
276 	int	status;
277 	boolean_t retry;
278 	uint_t	remretries;
279 
280 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_enable_enter,
281 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
282 
283 	retry = B_TRUE;
284 	/* set arbitrary number of retries before giving up */
285 	remretries = hci1394_upd_retries_before_fail;
286 	status = DDI_SUCCESS;
287 
288 	/*
289 	 * if waited for completion of interrupt processing generated callback,
290 	 * retry here
291 	 */
292 	ASSERT(MUTEX_NOT_HELD(&uvp->ctxtp->intrprocmutex));
293 	mutex_enter(&uvp->ctxtp->intrprocmutex);
294 
295 	while (retry == B_TRUE) {
296 		retry = B_FALSE;
297 		remretries--;
298 
299 		/* failure if update processing is already in progress */
300 		if (uvp->ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
301 			uvp->upd_status = IXL1394_EUPDATE_DISALLOWED;
302 			status = DDI_FAILURE;
303 		} else if (uvp->ctxtp->intr_flags & HCI1394_ISO_CTXT_ININTR) {
304 			/*
305 			 * if have retried max number of times or if this update
306 			 * request is on the interrupt stack, which means that
307 			 * the callback function of the target driver initiated
308 			 * the update, set update failure.
309 			 */
310 			if ((remretries <= 0) ||
311 			    (servicing_interrupt())) {
312 				uvp->upd_status = IXL1394_EUPDATE_DISALLOWED;
313 				status = DDI_FAILURE;
314 			} else {
315 				/*
316 				 * if not on interrupt stack and retries not
317 				 * exhausted, free mutex, wait a short time
318 				 * and then retry.
319 				 */
320 				retry = B_TRUE;
321 				mutex_exit(&uvp->ctxtp->intrprocmutex);
322 				drv_usecwait(1);
323 				mutex_enter(&uvp->ctxtp->intrprocmutex);
324 				continue;
325 			}
326 		} else if (uvp->ctxtp->intr_flags & HCI1394_ISO_CTXT_INCALL) {
327 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
328 			status = DDI_FAILURE;
329 		}
330 	}
331 
332 	/* if context is available, reserve it for this update request */
333 	if (status == DDI_SUCCESS) {
334 		uvp->ctxtp->intr_flags |= HCI1394_ISO_CTXT_INUPDATE;
335 	}
336 
337 	ASSERT(MUTEX_HELD(&uvp->ctxtp->intrprocmutex));
338 	mutex_exit(&uvp->ctxtp->intrprocmutex);
339 
340 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_enable_exit,
341 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
342 	return (status);
343 }
344 
345 /*
346  * hci1394_ixl_update_endup()
347  *    The ending stage of coordinating with simultaneously running interrupts.
348  *    Perform interrupt processing sync tasks if we (update) had blocked the
349  *    interrupt out when it wanted a turn.
350  */
351 static int
hci1394_ixl_update_endup(hci1394_ixl_update_vars_t * uvp)352 hci1394_ixl_update_endup(hci1394_ixl_update_vars_t *uvp)
353 {
354 	uint_t status;
355 	hci1394_iso_ctxt_t *ctxtp;
356 
357 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_endup_enter,
358 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
359 
360 	status = HCI1394_IXL_INTR_NOERROR;
361 	ctxtp = uvp->ctxtp;
362 
363 	while (ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
364 
365 		if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INTRSET) {
366 			/*
367 			 * We don't need to grab the lock here because
368 			 * the "intr_flags" field is only modified in two
369 			 * ways - one in UPDATE and one in INTR routine. Since
370 			 * we know that it can't be modified simulataneously
371 			 * in another UDPATE thread - that is assured by the
372 			 * checks in "update_enable" - we would only be trying
373 			 * to protect against the INTR thread.  And since we
374 			 * are going to clear a bit here (and check it again
375 			 * at the top of the loop) we are not really concerned
376 			 * about missing its being set by the INTR routine.
377 			 */
378 			ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
379 
380 			status = hci1394_ixl_dma_sync(uvp->soft_statep, ctxtp);
381 			if (status ==  HCI1394_IXL_INTR_DMALOST) {
382 				/*
383 				 * Unlike above, we do care here as we are
384 				 * trying to clear the "in update" flag, and
385 				 * we don't want that lost because the INTR
386 				 * routine is trying to set its flag.
387 				 */
388 				mutex_enter(&uvp->ctxtp->intrprocmutex);
389 				ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
390 				mutex_exit(&uvp->ctxtp->intrprocmutex);
391 				continue;
392 			}
393 		}
394 
395 		ASSERT(MUTEX_NOT_HELD(&uvp->ctxtp->intrprocmutex));
396 		mutex_enter(&uvp->ctxtp->intrprocmutex);
397 		if (!(ctxtp->intr_flags & HCI1394_ISO_CTXT_INTRSET)) {
398 			ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
399 		}
400 		mutex_exit(&uvp->ctxtp->intrprocmutex);
401 	}
402 
403 	/* if DMA stopped or lost, formally stop context */
404 	if (status == HCI1394_IXL_INTR_DMASTOP) {
405 		hci1394_do_stop(uvp->soft_statep, ctxtp, B_TRUE, ID1394_DONE);
406 	} else if (status == HCI1394_IXL_INTR_DMALOST) {
407 		hci1394_do_stop(uvp->soft_statep, ctxtp, B_TRUE, ID1394_FAIL);
408 	}
409 
410 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_endup_exit,
411 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
412 	return (status);
413 }
414 
415 /*
416  * hci1394_ixl_update_prepare()
417  *    Preparation for the actual update (using temp uvp struct)
418  */
419 static int
hci1394_ixl_update_prepare(hci1394_ixl_update_vars_t * uvp)420 hci1394_ixl_update_prepare(hci1394_ixl_update_vars_t *uvp)
421 {
422 	int		    ret;
423 
424 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_enter,
425 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
426 
427 	/* both new and old ixl commands must be the same */
428 	if (uvp->ixlnewp->ixl_opcode != uvp->ixloldp->ixl_opcode) {
429 
430 		uvp->upd_status = IXL1394_EOPCODE_MISMATCH;
431 
432 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prepare_exit,
433 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
434 		    "EOPCODE_MISMATCH");
435 		return (IXL_PREP_FAILURE);
436 	}
437 
438 	/*
439 	 * perform evaluation and prepare update based on specific
440 	 * IXL command type
441 	 */
442 	switch (uvp->ixl_opcode) {
443 
444 	case IXL1394_OP_CALLBACK_U: {
445 		ixl1394_callback_t *old_callback_ixlp;
446 		ixl1394_callback_t *new_callback_ixlp;
447 
448 		old_callback_ixlp = (ixl1394_callback_t *)uvp->ixloldp;
449 		new_callback_ixlp = (ixl1394_callback_t *)uvp->ixlnewp;
450 
451 		/* perform update now without further evaluation */
452 		old_callback_ixlp->callback_arg =
453 		    new_callback_ixlp->callback_arg;
454 		old_callback_ixlp->callback = new_callback_ixlp->callback;
455 
456 		/* nothing else to do, return with done ok status */
457 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
458 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
459 		return (IXL_PREP_SUCCESS);
460 	}
461 
462 	case IXL1394_OP_JUMP_U:
463 		ret = hci1394_ixl_update_prep_jump(uvp);
464 
465 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
466 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
467 		return (ret);
468 
469 	case IXL1394_OP_SET_SKIPMODE_U:
470 		ret = hci1394_ixl_update_prep_set_skipmode(uvp);
471 
472 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
473 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
474 		return (ret);
475 
476 	case IXL1394_OP_SET_TAGSYNC_U:
477 		ret = hci1394_ixl_update_prep_set_tagsync(uvp);
478 
479 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
480 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
481 		return (ret);
482 
483 	case IXL1394_OP_RECV_PKT_U:
484 	case IXL1394_OP_RECV_PKT_ST_U:
485 		ret = hci1394_ixl_update_prep_recv_pkt(uvp);
486 
487 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
488 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
489 		return (ret);
490 
491 	case IXL1394_OP_RECV_BUF_U:
492 		ret = hci1394_ixl_update_prep_recv_buf(uvp);
493 
494 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
495 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
496 		return (ret);
497 
498 	case IXL1394_OP_SEND_PKT_U:
499 	case IXL1394_OP_SEND_PKT_ST_U:
500 	case IXL1394_OP_SEND_PKT_WHDR_ST_U:
501 		ret = hci1394_ixl_update_prep_send_pkt(uvp);
502 
503 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
504 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
505 		return (ret);
506 
507 	case IXL1394_OP_SEND_BUF_U:
508 		ret = hci1394_ixl_update_prep_send_buf(uvp);
509 
510 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
511 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
512 		return (ret);
513 
514 	default:
515 		/* ixl command being updated must be one of above, else error */
516 		uvp->upd_status = IXL1394_EOPCODE_DISALLOWED;
517 
518 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prepare_exit,
519 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
520 		return (IXL_PREP_FAILURE);
521 	}
522 }
523 
524 /*
525  * hci1394_ixl_update_prep_jump()
526  *    Preparation for update of an IXL1394_OP_JUMP_U command.
527  */
528 static int
hci1394_ixl_update_prep_jump(hci1394_ixl_update_vars_t * uvp)529 hci1394_ixl_update_prep_jump(hci1394_ixl_update_vars_t *uvp)
530 {
531 	ixl1394_jump_t	    *old_jump_ixlp;
532 	ixl1394_jump_t	    *new_jump_ixlp;
533 	ixl1394_command_t   *ixlp;
534 	hci1394_xfer_ctl_t  *xferctlp;
535 	hci1394_desc_t	    *hcidescp;
536 	uint_t		    cbcnt;
537 	ddi_acc_handle_t    acc_hdl;
538 	ddi_dma_handle_t    dma_hdl;
539 	uint32_t	    desc_hdr;
540 	int		    err;
541 
542 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_jump_enter,
543 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
544 
545 	old_jump_ixlp = (ixl1394_jump_t *)uvp->ixloldp;
546 	new_jump_ixlp = (ixl1394_jump_t *)uvp->ixlnewp;
547 
548 	/* check if any change between new and old ixl jump command */
549 	if (new_jump_ixlp->label == old_jump_ixlp->label) {
550 
551 		/* if none, return with done ok status */
552 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_jump_exit,
553 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
554 		return (IXL_PREP_SUCCESS);
555 	}
556 
557 	/* new ixl jump command label must be ptr to valid ixl label or NULL */
558 	if ((new_jump_ixlp->label != NULL) &&
559 	    (new_jump_ixlp->label->ixl_opcode != IXL1394_OP_LABEL)) {
560 
561 		/* if not jumping to label, return an error */
562 		uvp->upd_status = IXL1394_EJUMP_NOT_TO_LABEL;
563 
564 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prepare_exit,
565 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
566 		    "EJUMP_NOT_TO_LABEL");
567 		return (IXL_PREP_FAILURE);
568 	}
569 
570 	/*
571 	 * follow exec path from new ixl jump command label to determine new
572 	 * jump destination ixl xfer command
573 	 */
574 	(void) hci1394_ixl_find_next_exec_xfer(new_jump_ixlp->label, &cbcnt,
575 	    &ixlp);
576 	if (ixlp != NULL) {
577 		/*
578 		 * get the bound address of the first descriptor block reached
579 		 * by the jump destination.  (This descriptor is the first
580 		 * transfer command following the jumped-to label.)  Set the
581 		 * descriptor's address (with Z bits) into jumpaddr.
582 		 */
583 		uvp->jumpaddr = ((hci1394_xfer_ctl_t *)
584 		    ixlp->compiler_privatep)->dma[0].dma_bound;
585 	}
586 
587 	/*
588 	 * get associated xfer IXL command from compiler_privatep of old
589 	 * jump command
590 	 */
591 	if ((uvp->ixlxferp = (ixl1394_command_t *)
592 	    old_jump_ixlp->compiler_privatep) == NULL) {
593 
594 		/* if none, return an error */
595 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
596 
597 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_jump_exit,
598 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
599 		    "EORIG_IXL_CORRUPTED");
600 		return (IXL_PREP_FAILURE);
601 	}
602 
603 	/*
604 	 * get the associated IXL xfer command's last dma descriptor block
605 	 * last descriptor, then get hcihdr from its hdr field,
606 	 * removing interrupt enabled bits
607 	 */
608 	xferctlp = (hci1394_xfer_ctl_t *)uvp->ixlxferp->compiler_privatep;
609 	hcidescp = (hci1394_desc_t *)xferctlp->dma[xferctlp->cnt - 1].dma_descp;
610 	acc_hdl  = xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_handle;
611 	dma_hdl  = xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_dma_handle;
612 
613 	/* Sync the descriptor before we grab the header(s) */
614 	err = ddi_dma_sync(dma_hdl, (off_t)hcidescp, sizeof (hci1394_desc_t),
615 	    DDI_DMA_SYNC_FORCPU);
616 	if (err != DDI_SUCCESS) {
617 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
618 
619 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_jump_exit,
620 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
621 		    "EINTERNAL_ERROR: dma_sync() failed");
622 		return (IXL_PREP_FAILURE);
623 	}
624 
625 	desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
626 	uvp->hcihdr = desc_hdr & ~DESC_INTR_ENBL;
627 
628 	/* set depth to last dma descriptor block & update count to 1 */
629 	uvp->ixldepth = xferctlp->cnt - 1;
630 	uvp->ixlcount = 1;
631 
632 	/*
633 	 * if there is only one dma descriptor block and IXL xfer command
634 	 * inited by a label or have found callbacks along the exec path to the
635 	 * new destination IXL xfer command, enable interrupt in hcihdr value
636 	 */
637 	if (((xferctlp->cnt == 1) &&
638 	    ((xferctlp->ctl_flags & XCTL_LABELLED) != 0)) || (cbcnt != 0)) {
639 
640 		uvp->hcihdr |= DESC_INTR_ENBL;
641 	}
642 
643 	/* If either old or new destination was/is NULL, enable interrupt */
644 	if ((new_jump_ixlp->label == NULL) || (old_jump_ixlp->label == NULL)) {
645 		uvp->hcihdr |= DESC_INTR_ENBL;
646 	}
647 
648 	/*
649 	 * if xfer type is xmit and skip mode for this for this xfer command is
650 	 * IXL1394_SKIP_TO_NEXT then set uvp->skipmode to IXL1394_SKIP_TO_NEXT
651 	 * and set uvp->skipxferp to uvp->jumpaddr and set uvp->hci_offset to
652 	 * offset from last dma descriptor to first dma descriptor
653 	 * (where skipaddr goes).
654 	 *
655 	 * update perform processing will have to set skip branch address to
656 	 * same location as jump destination in this case.
657 	 */
658 	uvp->skipmode = IXL1394_SKIP_TO_STOP;
659 	if ((uvp->ixlxferp->ixl_opcode & IXL1394_OPF_ONXMIT) != 0) {
660 
661 		if ((xferctlp->skipmodep && (((ixl1394_set_skipmode_t *)
662 		    xferctlp->skipmodep)->skipmode == IXL1394_SKIP_TO_NEXT)) ||
663 		    (uvp->ctxtp->default_skipmode == IXL1394_OPF_ONXMIT)) {
664 
665 			uvp->skipmode = IXL1394_SKIP_TO_NEXT;
666 			uvp->skipaddr = uvp->jumpaddr;
667 
668 			/*
669 			 * calc hci_offset to first descriptor (where skipaddr
670 			 * goes) of dma descriptor block from current (last)
671 			 * descriptor of the descriptor block (accessed in
672 			 * xfer_ctl dma_descp of IXL xfer command)
673 			 */
674 			if (uvp->ixlxferp->ixl_opcode ==
675 			    IXL1394_OP_SEND_HDR_ONLY) {
676 				/*
677 				 * send header only is (Z bits - 2)
678 				 * descriptor components back from last one
679 				 */
680 				uvp->hci_offset -= 2;
681 			} else {
682 				/*
683 				 * all others are (Z bits - 1) descriptor
684 				 * components back from last component
685 				 */
686 				uvp->hci_offset -= 1;
687 			}
688 		}
689 	}
690 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_jump_exit,
691 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
692 	return (IXL_PREP_READY);
693 }
694 
695 /*
696  * hci1394_ixl_update_prep_set_skipmode()
697  *    Preparation for update of an IXL1394_OP_SET_SKIPMODE_U command.
698  */
699 static int
hci1394_ixl_update_prep_set_skipmode(hci1394_ixl_update_vars_t * uvp)700 hci1394_ixl_update_prep_set_skipmode(hci1394_ixl_update_vars_t *uvp)
701 {
702 	ixl1394_set_skipmode_t	*old_set_skipmode_ixlp;
703 	ixl1394_set_skipmode_t	*new_set_skipmode_ixlp;
704 	ixl1394_command_t	*ixlp;
705 	hci1394_xfer_ctl_t	*xferctlp;
706 
707 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_skipmode_enter,
708 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
709 
710 	old_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixloldp;
711 	new_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixlnewp;
712 
713 	/* check if new set skipmode is change from old set skipmode */
714 	if (new_set_skipmode_ixlp->skipmode ==
715 	    old_set_skipmode_ixlp->skipmode) {
716 
717 		if ((new_set_skipmode_ixlp->skipmode !=
718 		    IXL1394_SKIP_TO_LABEL) ||
719 		    (old_set_skipmode_ixlp->label ==
720 		    new_set_skipmode_ixlp->label)) {
721 
722 			TNF_PROBE_0_DEBUG(
723 				hci1394_ixl_update_prep_set_skipmode_exit,
724 				HCI1394_TNF_HAL_STACK_ISOCH, "");
725 
726 			/* No change, return with done ok status */
727 			return (IXL_PREP_SUCCESS);
728 		}
729 	}
730 
731 	/* find associated ixl xfer commnd by following old ixl links */
732 	uvp->ixlxferp = uvp->ixloldp->next_ixlp;
733 	while ((uvp->ixlxferp != NULL) && (((uvp->ixlxferp->ixl_opcode &
734 	    IXL1394_OPF_ISXFER) == 0) ||
735 	    ((uvp->ixlxferp->ixl_opcode & IXL1394_OPTY_MASK) !=	0))) {
736 
737 		uvp->ixlxferp = uvp->ixlxferp->next_ixlp;
738 	}
739 
740 	/* return an error if no ixl xfer command found */
741 	if (uvp->ixlxferp == NULL) {
742 
743 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
744 
745 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_set_skipmode_exit,
746 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string,
747 		    errmsg, "EORIG_IXL_CORRUPTED");
748 		return (IXL_PREP_FAILURE);
749 	}
750 
751 	/*
752 	 * get Z bits (number of descriptor components in descriptor block)
753 	 * from a dma bound addr in the xfer_ctl struct of the IXL xfer command
754 	 */
755 	if ((xferctlp = (hci1394_xfer_ctl_t *)
756 	    uvp->ixlxferp->compiler_privatep) == NULL) {
757 
758 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
759 
760 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_set_skipmode_exit,
761 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
762 		    "EORIG_IXL_CORRUPTED");
763 		return (IXL_PREP_FAILURE);
764 	}
765 	uvp->hci_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK;
766 
767 	/*
768 	 * determine hci_offset to first component (where skipaddr goes) of
769 	 * dma descriptor block from current (last) descriptor component of
770 	 * desciptor block (accessed in xfer_ctl dma_descp of IXL xfer command)
771 	 */
772 	if (uvp->ixlxferp->ixl_opcode == IXL1394_OP_SEND_HDR_ONLY) {
773 		/*
774 		 * "send header only" is (Z bits - 2) descriptors back
775 		 * from last one
776 		 */
777 		uvp->hci_offset -= 2;
778 	} else {
779 		/*
780 		 * all others are (Z bits - 1) descroptors back from
781 		 * last descriptor.
782 		 */
783 		uvp->hci_offset -= 1;
784 	}
785 
786 	/* set depth to zero and count to update all dma descriptors */
787 	uvp->ixldepth = 0;
788 	uvp->ixlcount = xferctlp->cnt;
789 
790 	/* set new skipmode and validate */
791 	uvp->skipmode = new_set_skipmode_ixlp->skipmode;
792 
793 	if ((uvp->skipmode != IXL1394_SKIP_TO_NEXT) &&
794 	    (uvp->skipmode != IXL1394_SKIP_TO_SELF) &&
795 	    (uvp->skipmode != IXL1394_SKIP_TO_STOP) &&
796 	    (uvp->skipmode != IXL1394_SKIP_TO_LABEL)) {
797 
798 		/* return an error if invalid mode */
799 		uvp->upd_status = IXL1394_EBAD_SKIPMODE;
800 
801 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_set_skipmode_exit,
802 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string,
803 		    errmsg, "EBAD_SKIPMODE");
804 		return (IXL_PREP_FAILURE);
805 	}
806 
807 	/* if mode is skip to label */
808 	if (uvp->skipmode == IXL1394_SKIP_TO_LABEL) {
809 
810 		/* verify label field is valid ixl label cmd */
811 		if ((new_set_skipmode_ixlp->label == NULL) ||
812 		    (new_set_skipmode_ixlp->label->ixl_opcode !=
813 			IXL1394_OP_LABEL)) {
814 
815 			/* Error - not skipping to valid label */
816 			uvp->upd_status = IXL1394_EBAD_SKIP_LABEL;
817 
818 			TNF_PROBE_0_DEBUG(
819 				hci1394_ixl_update_prep_set_skipmode_exit,
820 				HCI1394_TNF_HAL_STACK_ISOCH, "");
821 			return (IXL_PREP_FAILURE);
822 		}
823 
824 		/*
825 		 * follow new skip exec path after label to next xfer
826 		 * IXL command
827 		 */
828 		(void) hci1394_ixl_find_next_exec_xfer(
829 			new_set_skipmode_ixlp->label, NULL, &ixlp);
830 
831 		/*
832 		 * set skip destination IXL xfer command.
833 		 * after update set into old set skip mode IXL compiler_privatep
834 		 */
835 		if ((uvp->skipxferp = ixlp) != NULL) {
836 			/*
837 			 * set skipaddr to be the first dma descriptor block's
838 			 * dma bound address w/Z bits
839 			 */
840 			xferctlp = (hci1394_xfer_ctl_t *)
841 			    ixlp->compiler_privatep;
842 			uvp->skipaddr = xferctlp->dma[0].dma_bound;
843 		}
844 	}
845 
846 	/*
847 	 * if mode is skip to next, get skipaddr for last dma descriptor block
848 	 */
849 	if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
850 		/* follow normal exec path to next xfer ixl command */
851 		(void) hci1394_ixl_find_next_exec_xfer(uvp->ixlxferp->next_ixlp,
852 		    NULL, &ixlp);
853 
854 		/*
855 		 * get skip_next destination IXL xfer command
856 		 * (for last iteration)
857 		 */
858 		if (ixlp != NULL) {
859 			/*
860 			 * set skipaddr to first dma descriptor block's
861 			 * dma bound address w/Z bits
862 			 */
863 			xferctlp = (hci1394_xfer_ctl_t *)
864 			    ixlp->compiler_privatep;
865 			uvp->skipaddr = xferctlp->dma[0].dma_bound;
866 		}
867 	}
868 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_skipmode_exit,
869 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
870 	return (IXL_PREP_READY);
871 }
872 
873 /*
874  * hci1394_ixl_update_prep_set_tagsync()
875  *    Preparation for update of an IXL1394_OP_SET_TAGSYNC_U command.
876  */
877 static int
hci1394_ixl_update_prep_set_tagsync(hci1394_ixl_update_vars_t * uvp)878 hci1394_ixl_update_prep_set_tagsync(hci1394_ixl_update_vars_t *uvp)
879 {
880 	ixl1394_set_tagsync_t	*old_set_tagsync_ixlp;
881 	ixl1394_set_tagsync_t	*new_set_tagsync_ixlp;
882 	hci1394_xfer_ctl_t	*xferctlp;
883 
884 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_enter,
885 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
886 
887 	old_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixloldp;
888 	new_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixlnewp;
889 
890 	/* check if new set tagsync is change from old set tagsync. */
891 	if ((new_set_tagsync_ixlp->tag == old_set_tagsync_ixlp->tag) &&
892 	    (new_set_tagsync_ixlp->sync == old_set_tagsync_ixlp->sync)) {
893 
894 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
895 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
896 
897 		/* no change, return with done ok status */
898 		return (IXL_PREP_SUCCESS);
899 	}
900 
901 	/* find associated IXL xfer commnd by following old ixl links */
902 	uvp->ixlxferp = uvp->ixloldp->next_ixlp;
903 	while ((uvp->ixlxferp != NULL) && (((uvp->ixlxferp->ixl_opcode &
904 	    IXL1394_OPF_ISXFER) == 0) ||
905 	    ((uvp->ixlxferp->ixl_opcode & IXL1394_OPTY_MASK) != 0))) {
906 
907 		uvp->ixlxferp = uvp->ixlxferp->next_ixlp;
908 	}
909 
910 	/* return an error if no IXL xfer command found */
911 	if (uvp->ixlxferp == NULL) {
912 
913 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
914 
915 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
916 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
917 		return (IXL_PREP_FAILURE);
918 	}
919 
920 	/* is IXL xfer command an IXL1394_OP_SEND_NO_PKT? */
921 	if (uvp->ixlxferp->ixl_opcode == IXL1394_OP_SEND_NO_PKT) {
922 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
923 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
924 
925 		/* no update needed, return done ok status */
926 		return (IXL_PREP_SUCCESS);
927 	}
928 
929 	/* build new pkthdr1 from new IXL tag/sync bits */
930 	uvp->pkthdr1 = (uvp->ctxtp->isospd << DESC_PKT_SPD_SHIFT) |
931 	    (new_set_tagsync_ixlp->tag << DESC_PKT_TAG_SHIFT) |
932 	    (uvp->ctxtp->isochan << DESC_PKT_CHAN_SHIFT) |
933 	    (new_set_tagsync_ixlp->sync << DESC_PKT_SY_SHIFT);
934 
935 	/*
936 	 * get Z bits (# of descriptor components in descriptor block) from
937 	 * any dma bound address in the xfer_ctl struct of the IXL xfer cmd
938 	 */
939 	if ((xferctlp =	(hci1394_xfer_ctl_t *)
940 	    uvp->ixlxferp->compiler_privatep) == NULL) {
941 
942 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
943 
944 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
945 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
946 		return (IXL_PREP_FAILURE);
947 	}
948 	uvp->hdr_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK;
949 
950 	/*
951 	 * determine hdr_offset from the current(last) descriptor of the
952 	 * DMA descriptor block to the descriptor where pkthdr1 goes
953 	 * by examining IXL xfer command
954 	 */
955 	if (uvp->ixlxferp->ixl_opcode == IXL1394_OP_SEND_HDR_ONLY) {
956 		/*
957 		 * if IXL send header only, the current (last)
958 		 * descriptor is the one
959 		 */
960 		uvp->hdr_offset = 0;
961 	} else {
962 		/*
963 		 * all others are the first descriptor (Z bits - 1)
964 		 * back from the last
965 		 */
966 		uvp->hdr_offset -= 1;
967 	}
968 
969 	/* set depth to zero and count to update all dma descriptors */
970 	uvp->ixldepth = 0;
971 	uvp->ixlcount = xferctlp->cnt;
972 
973 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_set_tagsync_exit,
974 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
975 	return (IXL_PREP_READY);
976 }
977 
978 /*
979  * hci1394_ixl_update_prep_recv_pkt()
980  *    Preparation for update of an IXL1394_OP_RECV_PKT_U or
981  *    IXL1394_OP_RECV_PKT_ST_U command.
982  */
983 static int
hci1394_ixl_update_prep_recv_pkt(hci1394_ixl_update_vars_t * uvp)984 hci1394_ixl_update_prep_recv_pkt(hci1394_ixl_update_vars_t *uvp)
985 {
986 	ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
987 	ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
988 	hci1394_xfer_ctl_t *xferctlp;
989 	hci1394_desc_t	   *hcidescp;
990 	ddi_acc_handle_t   acc_hdl;
991 	ddi_dma_handle_t   dma_hdl;
992 	uint32_t	   desc_hdr;
993 	int		   err;
994 
995 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_enter,
996 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
997 
998 	old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
999 	new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
1000 
1001 	/* check if any change between new and old IXL xfer commands */
1002 	if ((new_xfer_pkt_ixlp->size == old_xfer_pkt_ixlp->size) &&
1003 	    (new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr ==
1004 		old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr) &&
1005 	    (new_xfer_pkt_ixlp->mem_bufp == old_xfer_pkt_ixlp->mem_bufp)) {
1006 
1007 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
1008 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1009 
1010 		/* no change. return with done ok status */
1011 		return (IXL_PREP_SUCCESS);
1012 	}
1013 
1014 	/* if new IXL buffer addrs are null, return error */
1015 	if ((new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr == NULL) ||
1016 	    (new_xfer_pkt_ixlp->mem_bufp == NULL)) {
1017 
1018 		uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
1019 
1020 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
1021 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1022 		return (IXL_PREP_FAILURE);
1023 	}
1024 
1025 	/* if IXL xfer command is not xfer start command */
1026 	if (uvp->ixl_opcode == IXL1394_OP_RECV_PKT_U) {
1027 		/*
1028 		 * find IXL xfer start command in the compiler_privatep of the
1029 		 * old IXL xfer command
1030 		 */
1031 		uvp->ixlxferp = (ixl1394_command_t *)
1032 		    uvp->ixloldp->compiler_privatep;
1033 
1034 		if (uvp->ixlxferp == NULL) {
1035 
1036 			/* Error - no IXL xfer start command found */
1037 			uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1038 
1039 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
1040 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1041 			return (IXL_PREP_FAILURE);
1042 		}
1043 	} else {
1044 		/* IXL xfer command is the IXL xfer start command */
1045 		uvp->ixlxferp = uvp->ixloldp;
1046 	}
1047 
1048 	/* check that xfer_ctl is present in the IXL xfer start command */
1049 	if ((xferctlp = (hci1394_xfer_ctl_t *)
1050 	    uvp->ixlxferp->compiler_privatep) == NULL) {
1051 
1052 		/* Error - no xfer_ctl struct found */
1053 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1054 
1055 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
1056 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1057 		return (IXL_PREP_FAILURE);
1058 	}
1059 
1060 	/* set depth to zero and count to 1 to update dma descriptor */
1061 	uvp->ixldepth = 0;
1062 	uvp->ixlcount = 1;
1063 
1064 	/*
1065 	 * get Z bits (number of descriptors in descriptor block) from the DMA
1066 	 * bound address in the xfer_ctl struct of the IXL xfer start cpmmand.
1067 	 */
1068 	uvp->hci_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK;
1069 
1070 	/*
1071 	 * set offset from the current(last) descriptor to the descriptor for
1072 	 * this packet command
1073 	 */
1074 	uvp->hci_offset -= (1 + uvp->ixloldp->compiler_resv);
1075 
1076 	/*
1077 	 * set bufsize to the new IXL xfer size, and bufaddr to the new
1078 	 * IXL xfer bufp
1079 	 */
1080 	uvp->bufsize = ((ixl1394_xfer_pkt_t *)uvp->ixlnewp)->size;
1081 	uvp->bufaddr = ((ixl1394_xfer_pkt_t *)
1082 	    uvp->ixlnewp)->ixl_buf.ixldmac_addr;
1083 
1084 	/*
1085 	 * update old hcihdr w/new bufsize, set hcistatus rescnt to
1086 	 * new bufsize
1087 	 */
1088 	hcidescp = (hci1394_desc_t *)xferctlp->dma[0].dma_descp -
1089 	    uvp->hci_offset;
1090 	acc_hdl  = xferctlp->dma[0].dma_buf->bi_handle;
1091 	dma_hdl  = xferctlp->dma[0].dma_buf->bi_dma_handle;
1092 
1093 	/* Sync the descriptor before we grab the header(s) */
1094 	err = ddi_dma_sync(dma_hdl, (off_t)hcidescp, sizeof (hci1394_desc_t),
1095 	    DDI_DMA_SYNC_FORCPU);
1096 	if (err != DDI_SUCCESS) {
1097 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1098 
1099 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
1100 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
1101 		    "EINTERNAL_ERROR: dma_sync() failed");
1102 		return (IXL_PREP_FAILURE);
1103 	}
1104 
1105 	desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
1106 	uvp->hcihdr = desc_hdr;
1107 	uvp->hcihdr &= ~DESC_HDR_REQCOUNT_MASK;
1108 	uvp->hcihdr |= (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
1109 	    DESC_HDR_REQCOUNT_MASK;
1110 	uvp->hcistatus = (uvp->bufsize << DESC_ST_RESCOUNT_SHIFT) &
1111 	    DESC_ST_RESCOUNT_MASK;
1112 
1113 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_pkt_exit,
1114 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1115 	return (IXL_PREP_READY);
1116 }
1117 
1118 /*
1119  * hci1394_ixl_update_prep_recv_buf()
1120  *    Preparation for update of an IXL1394_OP_RECV_BUF_U command.
1121  */
1122 static int
hci1394_ixl_update_prep_recv_buf(hci1394_ixl_update_vars_t * uvp)1123 hci1394_ixl_update_prep_recv_buf(hci1394_ixl_update_vars_t *uvp)
1124 {
1125 	ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
1126 	ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
1127 	hci1394_xfer_ctl_t *xferctlp;
1128 
1129 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_enter,
1130 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1131 
1132 	old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
1133 	new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
1134 
1135 	/* check if any change between new and old IXL xfer commands */
1136 	if ((new_xfer_buf_ixlp->size ==	old_xfer_buf_ixlp->size) &&
1137 	    (new_xfer_buf_ixlp->ixl_buf.ixldmac_addr ==
1138 		old_xfer_buf_ixlp->ixl_buf.ixldmac_addr) &&
1139 	    (new_xfer_buf_ixlp->mem_bufp == new_xfer_buf_ixlp->mem_bufp)) {
1140 
1141 		if (((uvp->ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) != 0) ||
1142 		    (new_xfer_buf_ixlp->pkt_size ==
1143 			old_xfer_buf_ixlp->pkt_size)) {
1144 
1145 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_exit,
1146 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1147 
1148 			/* no change. return with done ok status */
1149 			return (IXL_PREP_SUCCESS);
1150 		}
1151 	}
1152 
1153 	/* if new IXL buffer addrs are null, return error */
1154 	if ((new_xfer_buf_ixlp->ixl_buf.ixldmac_addr == NULL) ||
1155 	    (new_xfer_buf_ixlp->mem_bufp == NULL)) {
1156 
1157 		uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
1158 
1159 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_exit,
1160 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1161 		return (IXL_PREP_FAILURE);
1162 	}
1163 
1164 	/*
1165 	 * if not buffer fill mode, check that the new pkt_size > 0 and
1166 	 * new size/pkt_size doesn't change the count of dma descriptor
1167 	 * blocks required
1168 	 */
1169 	if ((uvp->ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) == 0) {
1170 		if ((new_xfer_buf_ixlp->pkt_size == 0) ||
1171 		    ((new_xfer_buf_ixlp->size /	new_xfer_buf_ixlp->pkt_size) !=
1172 		    (old_xfer_buf_ixlp->size / old_xfer_buf_ixlp->pkt_size))) {
1173 
1174 			/* count changes. return an error */
1175 			uvp->upd_status = IXL1394_EXFER_BUF_CNT_DIFF;
1176 
1177 			TNF_PROBE_0_DEBUG(
1178 				hci1394_ixl_update_prep_recv_buf_exit,
1179 				HCI1394_TNF_HAL_STACK_ISOCH, "");
1180 			return (IXL_PREP_FAILURE);
1181 		}
1182 	}
1183 
1184 	/* set old IXL xfer command as the current IXL xfer command */
1185 	uvp->ixlxferp = uvp->ixloldp;
1186 
1187 	/* check that the xfer_ctl struct is present in IXL xfer command */
1188 	if ((xferctlp = (hci1394_xfer_ctl_t *)uvp->ixlxferp->compiler_privatep)
1189 	    == NULL) {
1190 
1191 		/* return an error if no xfer_ctl struct is found for command */
1192 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1193 
1194 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_exit,
1195 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1196 		return (IXL_PREP_FAILURE);
1197 	}
1198 
1199 	/* set depth to zero and count to update all dma descriptors */
1200 	uvp->ixldepth = 0;
1201 	uvp->ixlcount = xferctlp->cnt;
1202 
1203 	/* set bufsize to new pkt_size (or to new size if buffer fill mode) */
1204 	if ((uvp->ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) == 0) {
1205 		uvp->bufsize = new_xfer_buf_ixlp->pkt_size;
1206 	} else {
1207 		uvp->bufsize = new_xfer_buf_ixlp->size;
1208 	}
1209 
1210 	/* set bufaddr to new ixl_buf */
1211 	uvp->bufaddr = new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
1212 
1213 	/* set hcihdr reqcnt and hcistatus rescnt to new bufsize */
1214 	uvp->hci_offset = 0;
1215 	uvp->hcihdr = (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
1216 	    DESC_HDR_REQCOUNT_MASK;
1217 	uvp->hcistatus = (uvp->bufsize << DESC_ST_RESCOUNT_SHIFT) &
1218 	    DESC_ST_RESCOUNT_MASK;
1219 
1220 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_recv_buf_exit,
1221 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1222 	return (IXL_PREP_READY);
1223 }
1224 
1225 /*
1226  * hci1394_ixl_update_prep_send_pkt()
1227  *    Preparation for update of an IXL1394_OP_SEND_PKT_U command,
1228  *    IXL1394_OP_SEND_PKT_ST_U command and IXL1394_OP_SEND_PKT_WHDR_ST_U
1229  *    command.
1230  */
1231 static int
hci1394_ixl_update_prep_send_pkt(hci1394_ixl_update_vars_t * uvp)1232 hci1394_ixl_update_prep_send_pkt(hci1394_ixl_update_vars_t *uvp)
1233 {
1234 	ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
1235 	ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
1236 	hci1394_xfer_ctl_t *xferctlp;
1237 	hci1394_desc_imm_t *hcidescp;
1238 	ddi_acc_handle_t   acc_hdl;
1239 	ddi_dma_handle_t   dma_hdl;
1240 	uint32_t	   desc_hdr, desc_hdr2;
1241 	int		   err;
1242 
1243 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_enter,
1244 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1245 
1246 	old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
1247 	new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
1248 
1249 	/* check if any change between new and old IXL xfer commands */
1250 	if ((new_xfer_pkt_ixlp->size ==	old_xfer_pkt_ixlp->size) &&
1251 	    (new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr ==
1252 		old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr) &&
1253 	    (new_xfer_pkt_ixlp->mem_bufp == old_xfer_pkt_ixlp->mem_bufp)) {
1254 
1255 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1256 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1257 
1258 		/* if none, return with done ok status */
1259 		return (IXL_PREP_SUCCESS);
1260 	}
1261 
1262 	/* if new ixl buffer addrs are null, return error */
1263 	if ((new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr == NULL) ||
1264 	    (new_xfer_pkt_ixlp->mem_bufp == NULL)) {
1265 
1266 		uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
1267 
1268 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1269 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1270 		return (IXL_PREP_FAILURE);
1271 	}
1272 
1273 	/* error if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode and size < 4 */
1274 	if ((uvp->ixl_opcode == IXL1394_OP_SEND_PKT_WHDR_ST_U) &&
1275 	    (new_xfer_pkt_ixlp->size < 4)) {
1276 
1277 		uvp->upd_status = IXL1394_EPKT_HDR_MISSING;
1278 
1279 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1280 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1281 		return (IXL_PREP_FAILURE);
1282 	}
1283 
1284 	/* if IXL xfer command is not an IXL xfer start command */
1285 	if (uvp->ixl_opcode == IXL1394_OP_SEND_PKT_U) {
1286 		/*
1287 		 * find IXL xfer start command in the compiler_privatep of the
1288 		 * old IXL xfer command
1289 		 */
1290 		uvp->ixlxferp = (ixl1394_command_t *)
1291 		    old_xfer_pkt_ixlp->compiler_privatep;
1292 
1293 		if (uvp->ixlxferp == NULL) {
1294 			/* error if no IXL xfer start command found */
1295 			uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1296 
1297 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1298 				HCI1394_TNF_HAL_STACK_ISOCH, "");
1299 			return (IXL_PREP_FAILURE);
1300 		}
1301 	} else {
1302 		/* IXL xfer command is the IXL xfer start command */
1303 		uvp->ixlxferp = uvp->ixloldp;
1304 	}
1305 
1306 	/*
1307 	 * get Z bits (number of descriptor components in the descriptor block)
1308 	 * from a dma bound address in the xfer_ctl structure of the IXL
1309 	 * xfer start command
1310 	 */
1311 	if ((xferctlp = (hci1394_xfer_ctl_t *)
1312 	    uvp->ixlxferp->compiler_privatep) == NULL) {
1313 
1314 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1315 
1316 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1317 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1318 		return (IXL_PREP_FAILURE);
1319 	}
1320 
1321 	/* set depth to zero and count to 1 to update dma descriptor */
1322 	uvp->ixldepth = 0;
1323 	uvp->ixlcount = 1;
1324 
1325 	/*
1326 	 * set offset to the header(first) descriptor from the
1327 	 * current(last) descriptor
1328 	 */
1329 	uvp->hdr_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK - 1;
1330 
1331 	/*
1332 	 * set offset from the current(last) descriptor to the descriptor for
1333 	 * this packet command
1334 	 */
1335 	uvp->hci_offset = uvp->hdr_offset - 2 - uvp->ixloldp->compiler_resv;
1336 
1337 	/* set bufsize to new pkt buffr size, set bufaddr to new bufp */
1338 	uvp->bufsize = new_xfer_pkt_ixlp->size;
1339 	uvp->bufaddr = new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr;
1340 
1341 	/*
1342 	 * if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode, adjust size & buff,
1343 	 * step over hdr
1344 	 */
1345 	if (uvp->ixl_opcode == IXL1394_OP_SEND_PKT_WHDR_ST_U) {
1346 		uvp->bufsize -= 4;
1347 		uvp->bufaddr += 4;
1348 	}
1349 
1350 	/* update old hcihdr w/new bufsize */
1351 	hcidescp = (hci1394_desc_imm_t *)xferctlp->dma[0].dma_descp -
1352 	    uvp->hci_offset;
1353 	acc_hdl  = xferctlp->dma[0].dma_buf->bi_handle;
1354 	dma_hdl  = xferctlp->dma[0].dma_buf->bi_dma_handle;
1355 
1356 	/* Sync the descriptor before we grab the header(s) */
1357 	err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1358 	    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
1359 	if (err != DDI_SUCCESS) {
1360 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1361 
1362 		TNF_PROBE_1_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1363 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, errmsg,
1364 		    "EINTERNAL_ERROR: dma_sync() failed");
1365 		return (IXL_PREP_FAILURE);
1366 	}
1367 
1368 	desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
1369 	uvp->hcihdr = desc_hdr;
1370 	uvp->hcihdr &= ~DESC_HDR_REQCOUNT_MASK;
1371 	uvp->hcihdr |= (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
1372 	    DESC_HDR_REQCOUNT_MASK;
1373 
1374 	/* update old pkthdr2 w/new bufsize. error if exceeds 16k */
1375 	desc_hdr2 = ddi_get32(acc_hdl, &hcidescp->q2);
1376 	uvp->pkthdr2 = desc_hdr2;
1377 	uvp->pkthdr2 = (uvp->pkthdr2 & DESC_PKT_DATALEN_MASK) >>
1378 	    DESC_PKT_DATALEN_SHIFT;
1379 	uvp->pkthdr2 -= old_xfer_pkt_ixlp->size;
1380 	uvp->pkthdr2 += uvp->bufsize;
1381 
1382 	if (uvp->pkthdr2 > 0xFFFF) {
1383 		uvp->upd_status = IXL1394_EPKTSIZE_MAX_OFLO;
1384 
1385 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1386 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1387 		return (IXL_PREP_FAILURE);
1388 	}
1389 	uvp->pkthdr2 = (uvp->pkthdr2 << DESC_PKT_DATALEN_SHIFT) &
1390 	    DESC_PKT_DATALEN_MASK;
1391 
1392 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_pkt_exit,
1393 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1394 	return (IXL_PREP_READY);
1395 }
1396 
1397 /*
1398  * hci1394_ixl_update_prep_send_buf()
1399  *    Preparation for update of an IXL1394_OP_SEND_BUF_U command.
1400  */
1401 static int
hci1394_ixl_update_prep_send_buf(hci1394_ixl_update_vars_t * uvp)1402 hci1394_ixl_update_prep_send_buf(hci1394_ixl_update_vars_t *uvp)
1403 {
1404 	ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
1405 	ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
1406 	hci1394_xfer_ctl_t *xferctlp;
1407 
1408 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_enter,
1409 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1410 
1411 	old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
1412 	new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
1413 
1414 	/* check if any change between new and old IXL xfer commands */
1415 	if ((new_xfer_buf_ixlp->size == old_xfer_buf_ixlp->size) &&
1416 	    (new_xfer_buf_ixlp->pkt_size == old_xfer_buf_ixlp->pkt_size) &&
1417 	    (new_xfer_buf_ixlp->ixl_buf.ixldmac_addr ==
1418 		old_xfer_buf_ixlp->ixl_buf.ixldmac_addr) &&
1419 	    (new_xfer_buf_ixlp->mem_bufp == old_xfer_buf_ixlp->mem_bufp)) {
1420 
1421 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
1422 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1423 
1424 		/* no change, return with done ok status */
1425 		return (IXL_PREP_SUCCESS);
1426 	}
1427 
1428 	/* if new IXL buffer addresses are null, return error */
1429 	if ((new_xfer_buf_ixlp->ixl_buf.ixldmac_addr == NULL) ||
1430 	    (new_xfer_buf_ixlp->mem_bufp == NULL)) {
1431 
1432 		uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
1433 
1434 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
1435 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1436 		return (IXL_PREP_FAILURE);
1437 	}
1438 
1439 	/*
1440 	 * check that the new pkt_size > 0 and the new size/pkt_size
1441 	 * doesn't change the count of DMA descriptor blocks required
1442 	 */
1443 	if ((new_xfer_buf_ixlp->pkt_size == 0) ||
1444 	    ((new_xfer_buf_ixlp->size / new_xfer_buf_ixlp->pkt_size) !=
1445 	    (old_xfer_buf_ixlp->size / old_xfer_buf_ixlp->pkt_size))) {
1446 
1447 		/* Error - new has different pkt count than old */
1448 		uvp->upd_status = IXL1394_EXFER_BUF_CNT_DIFF;
1449 
1450 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
1451 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1452 		return (IXL_PREP_FAILURE);
1453 	}
1454 
1455 	/* set the old IXL xfer command as the current IXL xfer command */
1456 	uvp->ixlxferp = uvp->ixloldp;
1457 
1458 	/*
1459 	 * get Z bits (number of descriptor components in descriptor block)
1460 	 * from a DMA bound address in the xfer_ctl struct of the
1461 	 * IXL xfer command
1462 	 */
1463 	if ((xferctlp = (hci1394_xfer_ctl_t *)
1464 	    uvp->ixlxferp->compiler_privatep) == NULL) {
1465 
1466 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1467 
1468 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
1469 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1470 		return (IXL_PREP_FAILURE);
1471 	}
1472 
1473 	/* set depth to zero and count to update all dma descriptors */
1474 	uvp->ixldepth = 0;
1475 	uvp->ixlcount = xferctlp->cnt;
1476 
1477 	/*
1478 	 * set offset to the header(first) descriptor from the current (last)
1479 	 * descriptor.
1480 	 */
1481 	uvp->hdr_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK - 1;
1482 
1483 	/* set offset to the only(last) xfer descriptor */
1484 	uvp->hci_offset = 0;
1485 
1486 	/* set bufsize to the new pkt_size, set bufaddr to the new bufp */
1487 	uvp->bufsize = new_xfer_buf_ixlp->pkt_size;
1488 	uvp->bufaddr = new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
1489 
1490 	/*
1491 	 * if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode, adjust size & buff,
1492 	 * step over header (a quadlet)
1493 	 */
1494 	if (uvp->ixl_opcode == IXL1394_OP_SEND_PKT_WHDR_ST_U) {
1495 		uvp->bufsize -= 4;
1496 		uvp->bufaddr += 4;
1497 	}
1498 
1499 	/* set hcihdr to new bufsize */
1500 	uvp->hcihdr = (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
1501 	    DESC_HDR_REQCOUNT_MASK;
1502 
1503 	/* set pkthdr2 to new bufsize */
1504 	uvp->pkthdr2 = (uvp->bufsize << DESC_PKT_DATALEN_SHIFT) &
1505 	    DESC_PKT_DATALEN_MASK;
1506 
1507 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_prep_send_buf_exit,
1508 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1509 	return (IXL_PREP_READY);
1510 }
1511 
1512 /*
1513  * hci1394_ixl_update_perform()
1514  *    performs the actual update into DMA memory.
1515  */
1516 static int
hci1394_ixl_update_perform(hci1394_ixl_update_vars_t * uvp)1517 hci1394_ixl_update_perform(hci1394_ixl_update_vars_t *uvp)
1518 {
1519 	int			ii;
1520 	uint_t			skipaddrlast;
1521 	hci1394_xfer_ctl_t	*xferctlp;
1522 	hci1394_desc_imm_t	*hcidescp;
1523 	hci1394_iso_ctxt_t	*ctxtp;
1524 	ddi_acc_handle_t	acc_hdl;
1525 	ddi_dma_handle_t	dma_hdl;
1526 	int			err;
1527 
1528 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_enter,
1529 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1530 
1531 	ctxtp = uvp->ctxtp;
1532 
1533 	/*
1534 	 * if no target ixl xfer command to be updated or it has
1535 	 * no xfer_ctl struct, then internal error.
1536 	 */
1537 	if ((uvp->ixlxferp == NULL) ||
1538 	    ((xferctlp = (hci1394_xfer_ctl_t *)
1539 	    uvp->ixlxferp->compiler_privatep) == NULL)) {
1540 
1541 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1542 
1543 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1544 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1545 
1546 		return (DDI_FAILURE);
1547 	}
1548 
1549 	/* perform update based on specific ixl command type */
1550 	switch (uvp->ixl_opcode) {
1551 
1552 	case IXL1394_OP_JUMP_U: {
1553 		ixl1394_jump_t *old_jump_ixlp;
1554 		ixl1394_jump_t *new_jump_ixlp;
1555 
1556 		old_jump_ixlp = (ixl1394_jump_t *)uvp->ixloldp;
1557 		new_jump_ixlp = (ixl1394_jump_t *)uvp->ixlnewp;
1558 
1559 		/*
1560 		 * set new hdr and new branch fields into last component of last
1561 		 * dma descriptor block of ixl xfer cmd associated with
1562 		 * ixl jump cmd
1563 		 */
1564 		hcidescp = (hci1394_desc_imm_t *)
1565 		    xferctlp->dma[xferctlp->cnt - 1].dma_descp;
1566 		acc_hdl	 = xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_handle;
1567 		dma_hdl	 =
1568 		    xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_dma_handle;
1569 
1570 		ddi_put32(acc_hdl, &hcidescp->hdr, uvp->hcihdr);
1571 		ddi_put32(acc_hdl, &hcidescp->branch, uvp->jumpaddr);
1572 
1573 		/*
1574 		 * if xfer type is send and skip mode is IXL1394__SKIP_TO_NEXT
1575 		 * also set branch location into branch field of first
1576 		 * component (skip to address) of last dma descriptor block
1577 		 */
1578 		if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
1579 			hcidescp -= uvp->hci_offset;
1580 			ddi_put32(acc_hdl, &hcidescp->branch, uvp->skipaddr);
1581 		}
1582 
1583 		/* Sync descriptor for device (desc was modified) */
1584 		err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1585 		    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1586 		if (err != DDI_SUCCESS) {
1587 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1588 
1589 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1590 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1591 			return (DDI_FAILURE);
1592 		}
1593 
1594 		/* set old ixl jump cmd label from new ixl jump cmd label */
1595 		old_jump_ixlp->label = new_jump_ixlp->label;
1596 		break;
1597 	}
1598 	case IXL1394_OP_SET_SKIPMODE_U: {
1599 		ixl1394_set_skipmode_t *old_set_skipmode_ixlp;
1600 		ixl1394_set_skipmode_t *new_set_skipmode_ixlp;
1601 
1602 		old_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixloldp;
1603 		new_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixlnewp;
1604 
1605 		/*
1606 		 * if skip to next mode, save skip addr for last iteration
1607 		 * thru dma descriptor blocks for associated ixl xfer command
1608 		 */
1609 		if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
1610 			skipaddrlast = uvp->skipaddr;
1611 		}
1612 
1613 		/*
1614 		 * iterate through set of dma descriptor blocks for associated
1615 		 * ixl xfer start cmd and set new skip address into first hci
1616 		 * descriptor of each if skip next or skip self, first determine
1617 		 * address in each iteration
1618 		 */
1619 		for (ii = 0; ii < xferctlp->cnt; ii++) {
1620 			hcidescp = (hci1394_desc_imm_t *)
1621 			    xferctlp->dma[ii].dma_descp - uvp->hci_offset;
1622 			acc_hdl	 = xferctlp->dma[ii].dma_buf->bi_handle;
1623 			dma_hdl	 = xferctlp->dma[ii].dma_buf->bi_dma_handle;
1624 
1625 			if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
1626 				if (ii < (xferctlp->cnt - 1)) {
1627 					uvp->skipaddr =
1628 					    xferctlp->dma[ii + 1].dma_bound;
1629 				} else {
1630 					uvp->skipaddr = skipaddrlast;
1631 				}
1632 			} else if (uvp->skipmode == IXL1394_SKIP_TO_SELF) {
1633 				uvp->skipaddr = xferctlp->dma[ii].dma_bound;
1634 			}
1635 
1636 			ddi_put32(acc_hdl, &hcidescp->branch, uvp->skipaddr);
1637 
1638 			/* Sync descriptor for device (desc was modified) */
1639 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1640 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1641 			if (err != DDI_SUCCESS) {
1642 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1643 
1644 				TNF_PROBE_0_DEBUG(
1645 				    hci1394_ixl_update_perform_exit,
1646 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1647 				return (DDI_FAILURE);
1648 			}
1649 		}
1650 
1651 		/*
1652 		 * set old ixl set skip mode cmd mode and label from new ixl cmd
1653 		 * set old ixl set skip mode cmd compilier_privatep to
1654 		 * uvp->skipxferp
1655 		 */
1656 		old_set_skipmode_ixlp->skipmode = uvp->skipmode;
1657 		old_set_skipmode_ixlp->label = new_set_skipmode_ixlp->label;
1658 		old_set_skipmode_ixlp->compiler_privatep =
1659 		    (ixl1394_priv_t)uvp->skipxferp;
1660 		break;
1661 	}
1662 	case IXL1394_OP_SET_TAGSYNC_U: {
1663 		ixl1394_set_tagsync_t *old_set_tagsync_ixlp;
1664 		ixl1394_set_tagsync_t *new_set_tagsync_ixlp;
1665 
1666 		old_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixloldp;
1667 		new_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixlnewp;
1668 
1669 		/*
1670 		 * iterate through set of descriptor blocks for associated IXL
1671 		 * xfer command and set new pkthdr1 value into output more/last
1672 		 * immediate hci descriptor (first/last hci descriptor of each
1673 		 * descriptor block)
1674 		 */
1675 		for (ii = 0; ii < xferctlp->cnt; ii++) {
1676 			hcidescp = (hci1394_desc_imm_t *)
1677 			    xferctlp->dma[ii].dma_descp - uvp->hdr_offset;
1678 			acc_hdl	 = xferctlp->dma[ii].dma_buf->bi_handle;
1679 			dma_hdl	 = xferctlp->dma[ii].dma_buf->bi_dma_handle;
1680 			ddi_put32(acc_hdl, &hcidescp->q1, uvp->pkthdr1);
1681 
1682 			/* Sync descriptor for device (desc was modified) */
1683 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1684 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1685 			if (err != DDI_SUCCESS) {
1686 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1687 
1688 				TNF_PROBE_0_DEBUG(
1689 				    hci1394_ixl_update_perform_exit,
1690 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1691 				return (DDI_FAILURE);
1692 			}
1693 		}
1694 
1695 		/*
1696 		 * set old ixl set tagsync cmd tag & sync from new ixl set
1697 		 * tagsync cmd
1698 		 */
1699 		old_set_tagsync_ixlp->tag = new_set_tagsync_ixlp->tag;
1700 		old_set_tagsync_ixlp->sync = new_set_tagsync_ixlp->sync;
1701 		break;
1702 	}
1703 	case IXL1394_OP_RECV_PKT_U:
1704 	case IXL1394_OP_RECV_PKT_ST_U: {
1705 		ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
1706 		ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
1707 		uint32_t	   desc_status;
1708 
1709 		old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
1710 		new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
1711 
1712 		/*
1713 		 * alter buffer address, count and rescount in ixl recv pkt cmd
1714 		 * related hci component in dma descriptor block
1715 		 */
1716 		hcidescp = (hci1394_desc_imm_t *)
1717 		    xferctlp->dma[0].dma_descp - uvp->hci_offset;
1718 		acc_hdl	 = xferctlp->dma[0].dma_buf->bi_handle;
1719 		dma_hdl	 = xferctlp->dma[0].dma_buf->bi_dma_handle;
1720 		ddi_put32(acc_hdl, &hcidescp->hdr, uvp->hcihdr);
1721 		ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
1722 
1723 		/* Sync the descriptor before we grab the status */
1724 		err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1725 		    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
1726 		if (err != DDI_SUCCESS) {
1727 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1728 
1729 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1730 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1731 			return (DDI_FAILURE);
1732 		}
1733 
1734 		/* change only low 1/2 word and leave status bits unchanged */
1735 		desc_status = ddi_get32(acc_hdl, &hcidescp->status);
1736 		desc_status = (desc_status & ~DESC_ST_RESCOUNT_MASK) |
1737 		    uvp->hcistatus;
1738 		ddi_put32(acc_hdl, &hcidescp->status, desc_status);
1739 
1740 		/* Sync descriptor for device (desc was modified) */
1741 		err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1742 		    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1743 		if (err != DDI_SUCCESS) {
1744 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1745 
1746 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1747 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1748 			return (DDI_FAILURE);
1749 		}
1750 
1751 		/*
1752 		 * set old ixl recv pkt size and buffers from new
1753 		 * ixl recv pkt command
1754 		 */
1755 		old_xfer_pkt_ixlp->size = new_xfer_pkt_ixlp->size;
1756 		old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr =
1757 		    new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr;
1758 		old_xfer_pkt_ixlp->mem_bufp = new_xfer_pkt_ixlp->mem_bufp;
1759 		break;
1760 	}
1761 	case IXL1394_OP_RECV_BUF_U: {
1762 		ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
1763 		ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
1764 		uint32_t	   desc_hdr;
1765 		uint32_t	   desc_status;
1766 
1767 		old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
1768 		new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
1769 
1770 		/*
1771 		 * iterate through set of descriptor blocks for this IXL xfer
1772 		 * command altering buffer, count and rescount in each
1773 		 * input more/last(the only) hci descriptor block descriptor.
1774 		 */
1775 		for (ii = 0; ii < xferctlp->cnt; ii++) {
1776 
1777 			hcidescp = (hci1394_desc_imm_t *)
1778 			    xferctlp->dma[ii].dma_descp - uvp->hci_offset;
1779 			acc_hdl	 = xferctlp->dma[ii].dma_buf->bi_handle;
1780 			dma_hdl	 = xferctlp->dma[ii].dma_buf->bi_dma_handle;
1781 
1782 			ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
1783 
1784 			/*
1785 			 * advance to next buffer segment, adjust over header
1786 			 * if appropriate
1787 			 */
1788 			uvp->bufaddr += uvp->bufsize;
1789 
1790 			/* Sync the descriptor before we grab the header(s) */
1791 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1792 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
1793 			if (err != DDI_SUCCESS) {
1794 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1795 
1796 				TNF_PROBE_0_DEBUG(
1797 				    hci1394_ixl_update_perform_exit,
1798 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1799 				return (DDI_FAILURE);
1800 			}
1801 
1802 			/*
1803 			 * this preserves interrupt enable bits, et al. in each
1804 			 * descriptor block header.
1805 			 */
1806 			desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
1807 			desc_hdr = (desc_hdr & ~DESC_HDR_REQCOUNT_MASK) |
1808 			    uvp->hcihdr;
1809 			ddi_put32(acc_hdl, &hcidescp->hdr, desc_hdr);
1810 
1811 			/*
1812 			 * change only low 1/2 word leaving status bits
1813 			 * unchanged
1814 			 */
1815 			desc_status = ddi_get32(acc_hdl, &hcidescp->status);
1816 			desc_status = (desc_status & ~DESC_ST_RESCOUNT_MASK) |
1817 			    uvp->hcistatus;
1818 			ddi_put32(acc_hdl, &hcidescp->status, desc_status);
1819 
1820 			/* Sync descriptor for device (desc was modified) */
1821 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1822 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1823 			if (err != DDI_SUCCESS) {
1824 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1825 
1826 				TNF_PROBE_0_DEBUG(
1827 				    hci1394_ixl_update_perform_exit,
1828 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1829 				return (DDI_FAILURE);
1830 			}
1831 		}
1832 
1833 		/*
1834 		 * set old ixl recv buf sizes and buffers from
1835 		 * new ixl recv pkt cmd
1836 		 */
1837 		old_xfer_buf_ixlp->pkt_size = new_xfer_buf_ixlp->pkt_size;
1838 		old_xfer_buf_ixlp->size = new_xfer_buf_ixlp->size;
1839 		old_xfer_buf_ixlp->ixl_buf.ixldmac_addr =
1840 		    new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
1841 		old_xfer_buf_ixlp->mem_bufp = new_xfer_buf_ixlp->mem_bufp;
1842 		break;
1843 	}
1844 	case IXL1394_OP_SEND_PKT_U:
1845 	case IXL1394_OP_SEND_PKT_ST_U:
1846 	case IXL1394_OP_SEND_PKT_WHDR_ST_U: {
1847 		ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
1848 		ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
1849 
1850 		old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
1851 		new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
1852 
1853 		/*
1854 		 * replace pkthdr2 in output more immediate (the first) hci
1855 		 * descriptor in block, then alter buffer address and count in
1856 		 * IXL send pkt command related output more/last hci descriptor.
1857 		 */
1858 		hcidescp = (hci1394_desc_imm_t *)xferctlp->dma[0].dma_descp -
1859 		    uvp->hdr_offset;
1860 		acc_hdl	 = xferctlp->dma[0].dma_buf->bi_handle;
1861 		dma_hdl	 = xferctlp->dma[0].dma_buf->bi_dma_handle;
1862 
1863 		ddi_put32(acc_hdl, &hcidescp->q2, uvp->pkthdr2);
1864 		ddi_put32(acc_hdl, &hcidescp->hdr, uvp->hcihdr);
1865 		ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
1866 
1867 		/* Sync descriptor for device (desc was modified) */
1868 		err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1869 		    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1870 		if (err != DDI_SUCCESS) {
1871 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1872 
1873 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1874 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1875 			return (DDI_FAILURE);
1876 		}
1877 
1878 		/*
1879 		 * set old ixl recv pkt size and buffers from
1880 		 * new ixl recv pkt cmd
1881 		 */
1882 		old_xfer_pkt_ixlp->size = new_xfer_pkt_ixlp->size;
1883 		old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr =
1884 		    new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr;
1885 		old_xfer_pkt_ixlp->mem_bufp = new_xfer_pkt_ixlp->mem_bufp;
1886 		break;
1887 	}
1888 	case IXL1394_OP_SEND_BUF_U: {
1889 		ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
1890 		ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
1891 		uint32_t	   desc_hdr;
1892 
1893 		old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
1894 		new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
1895 
1896 		/*
1897 		 * iterate through set of descriptor blocks for this IXL xfer
1898 		 * command replacing pkthdr2 in output more immediate
1899 		 * (the first) hci descriptor block descriptor, then altering
1900 		 * buffer address and count in each output last (the only other)
1901 		 * hci descriptor block descriptor.
1902 		 */
1903 		for (ii = 0; ii < xferctlp->cnt; ii++) {
1904 			hcidescp = (hci1394_desc_imm_t *)
1905 			    xferctlp->dma[ii].dma_descp - uvp->hdr_offset;
1906 			acc_hdl	 = xferctlp->dma[ii].dma_buf->bi_handle;
1907 			dma_hdl	 = xferctlp->dma[ii].dma_buf->bi_dma_handle;
1908 
1909 			ddi_put32(acc_hdl, &hcidescp->q2, uvp->pkthdr2);
1910 			ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
1911 
1912 			/* advance to next buffer segment */
1913 			uvp->bufaddr += uvp->bufsize;
1914 
1915 			/* Sync the descriptor before we grab the header(s) */
1916 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1917 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
1918 			if (err != DDI_SUCCESS) {
1919 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1920 
1921 				TNF_PROBE_0_DEBUG(
1922 				    hci1394_ixl_update_perform_exit,
1923 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1924 				return (DDI_FAILURE);
1925 			}
1926 
1927 			/*
1928 			 * this preserves interrupt enable bits, et al
1929 			 * in each desc block hdr
1930 			 */
1931 			desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
1932 			desc_hdr = (desc_hdr & ~DESC_HDR_REQCOUNT_MASK) |
1933 			    uvp->hcihdr;
1934 			ddi_put32(acc_hdl, &hcidescp->hdr, desc_hdr);
1935 
1936 			/* Sync descriptor for device (desc was modified) */
1937 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1938 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1939 			if (err != DDI_SUCCESS) {
1940 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1941 
1942 				TNF_PROBE_0_DEBUG(
1943 				    hci1394_ixl_update_perform_exit,
1944 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1945 				return (DDI_FAILURE);
1946 			}
1947 		}
1948 
1949 		/*
1950 		 * set old ixl recv buf sizes and buffers from
1951 		 * new ixl recv pkt cmd
1952 		 */
1953 		old_xfer_buf_ixlp->pkt_size = new_xfer_buf_ixlp->pkt_size;
1954 		old_xfer_buf_ixlp->size = new_xfer_buf_ixlp->size;
1955 		old_xfer_buf_ixlp->ixl_buf.ixldmac_addr =
1956 		    new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
1957 		old_xfer_buf_ixlp->mem_bufp = new_xfer_buf_ixlp->mem_bufp;
1958 		break;
1959 	}
1960 	default:
1961 		/* ixl command being updated must be one of above, else error */
1962 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1963 
1964 		TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1965 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1966 		return (DDI_FAILURE);
1967 	}
1968 
1969 	/* hit the WAKE bit in the context control register */
1970 	if (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_RECV) {
1971 		HCI1394_IRCTXT_CTRL_SET(uvp->soft_statep, ctxtp->ctxt_index,
1972 		    0, 0, 0, 0, 0, 1 /* wake */);
1973 	} else {
1974 		HCI1394_ITCTXT_CTRL_SET(uvp->soft_statep, ctxtp->ctxt_index,
1975 		    0, 0, 0, 1 /* wake */);
1976 	}
1977 
1978 	/* perform update completed successfully */
1979 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_perform_exit,
1980 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1981 	return (DDI_SUCCESS);
1982 }
1983 
1984 /*
1985  * hci1394_ixl_update_evaluate()
1986  *    Evaluate where the hardware is in running through the DMA descriptor
1987  *    blocks.
1988  */
1989 static int
hci1394_ixl_update_evaluate(hci1394_ixl_update_vars_t * uvp)1990 hci1394_ixl_update_evaluate(hci1394_ixl_update_vars_t *uvp)
1991 {
1992 	hci1394_iso_ctxt_t	*ctxtp;
1993 	ixl1394_command_t	*ixlp;
1994 	int			ixldepth;
1995 	int			ii;
1996 
1997 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_evaluate_enter,
1998 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1999 
2000 	ctxtp = uvp->ctxtp;
2001 
2002 	ixlp = NULL;
2003 	ixldepth = 0xFFFFFFFF;
2004 
2005 	/*
2006 	 * repeat until IXL execution status evaluation function returns error
2007 	 * or until pointer to currently executing IXL command and its depth
2008 	 * stablize
2009 	 */
2010 	while ((ixlp != ctxtp->ixl_execp) ||
2011 	    (ixldepth != ctxtp->ixl_exec_depth)) {
2012 
2013 		ixlp = ctxtp->ixl_execp;
2014 		ixldepth = ctxtp->ixl_exec_depth;
2015 
2016 		/*
2017 		 * call IXL execution status evaluation (ixl_dma_sync)
2018 		 * function returning if error (HCI1394_IXL_INTR_DMALOST is
2019 		 * only error condition).
2020 		 *
2021 		 * Note: interrupt processing function can only return one of
2022 		 * the following statuses here:
2023 		 *    HCI1394_IXL_INTR_NOERROR, HCI1394_IXL_INTR_DMASTOP,
2024 		 *    HCI1394_IXL_INTR_DMALOST
2025 		 *
2026 		 * it can not return the following status here:
2027 		 *    HCI1394_IXL_INTR_NOADV
2028 		 *
2029 		 * Don't need to grab the lock here... for the same reason
2030 		 * explained in hci1394_ixl_update_endup() above.
2031 		 */
2032 		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
2033 		if (hci1394_ixl_dma_sync(uvp->soft_statep, ctxtp) ==
2034 		    HCI1394_IXL_INTR_DMALOST) {
2035 
2036 			/* return post-perform update failed status */
2037 			uvp->upd_status = IXL1394_EPOST_UPD_DMALOST;
2038 
2039 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_evaluate_exit,
2040 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2041 			return (DDI_FAILURE);
2042 		}
2043 	}
2044 
2045 	/*
2046 	 * if the currently executing IXL command is one of the IXL_MAX_LOCN
2047 	 * locations saved before update was performed, return update
2048 	 * successful status.
2049 	 */
2050 	for (ii = 0; ii < IXL_MAX_LOCN; ii++) {
2051 		if ((uvp->locn_info[ii].ixlp == ixlp) &&
2052 		    (uvp->locn_info[ii].ixldepth == ixldepth)) {
2053 
2054 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_evaluate_exit,
2055 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2056 			return (DDI_SUCCESS);
2057 		}
2058 	}
2059 
2060 	/*
2061 	 * else return post-perform update failed status.
2062 	 * note: later can make more sophisticated evaluations about where
2063 	 * execution processing went, and if update has really failed.
2064 	 */
2065 	uvp->upd_status = IXL1394_EPOST_UPD_DMALOST;
2066 
2067 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_evaluate_exit,
2068 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2069 	return (DDI_FAILURE);
2070 }
2071 
2072 /*
2073  * hci1394_ixl_update_analysis()
2074  *    Determine if the hardware is within the range we expected it to be.
2075  *    If so the update succeeded.
2076  */
2077 static int
hci1394_ixl_update_analysis(hci1394_ixl_update_vars_t * uvp)2078 hci1394_ixl_update_analysis(hci1394_ixl_update_vars_t *uvp)
2079 {
2080 	hci1394_iso_ctxt_t	*ctxtp;
2081 	ixl1394_command_t	*ixlp;
2082 	int			ixldepth;
2083 	int			ii;
2084 	int			status;
2085 
2086 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_analysis_enter,
2087 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2088 
2089 	ctxtp = uvp->ctxtp;
2090 
2091 	ixlp = NULL;
2092 	ixldepth = 0xFFFFFFFF;
2093 
2094 	/*
2095 	 * repeat until ixl execution status evaluation function returns error
2096 	 * or until pointer to currently executing ixl command and its depth
2097 	 * stablize.
2098 	 */
2099 	while ((ixlp != ctxtp->ixl_execp) ||
2100 	    (ixldepth != ctxtp->ixl_exec_depth)) {
2101 
2102 		ixlp = ctxtp->ixl_execp;
2103 		ixldepth = ctxtp->ixl_exec_depth;
2104 
2105 		/*
2106 		 * call ixl execution status evaluation (interrupt processing).
2107 		 * set IXL1394_EIDU_PRE_UPD_DMALOST if status INTR_DMALOST and
2108 		 * return.
2109 		 *
2110 		 * Note: interrupt processing function can only return one of
2111 		 * the following statuses here:
2112 		 *    HCI1394_IXL_INTR_NOERROR, HCI1394_IXL_INTR_DMASTOP or
2113 		 *    HCI1394_IXL_INTR_DMALOST
2114 		 *
2115 		 * it can not return the following status here:
2116 		 *    HCI1394_IXL_INTR_NOADV
2117 		 *
2118 		 * Don't need to grab the lock here... for the same reason
2119 		 * explained in hci1394_ixl_update_endup() above.
2120 		 */
2121 		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
2122 
2123 		status = hci1394_ixl_dma_sync(uvp->soft_statep, ctxtp);
2124 		if (status == HCI1394_IXL_INTR_DMALOST) {
2125 			/*
2126 			 * set pre-update dma processing lost status and
2127 			 * return error
2128 			 */
2129 			uvp->upd_status = IXL1394_EPRE_UPD_DMALOST;
2130 
2131 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_analysis_exit,
2132 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2133 			return (DDI_FAILURE);
2134 		}
2135 	}
2136 
2137 	/*
2138 	 * save locations of currently executing ixl command and the
2139 	 * 3 following it.
2140 	 */
2141 	hci1394_ixl_update_set_locn_info(uvp);
2142 
2143 	/*
2144 	 * if xfer_ixl_cmd associated with the IXL_command being updated is one
2145 	 * of the saved (currently executing) IXL commands, risk is too great to
2146 	 * perform update now, set IXL1394_ERISK_PROHIBITS_UPD status and
2147 	 * return error.
2148 	 *
2149 	 * Note: later can implement more sophisticated risk override
2150 	 * evaluations and processing.
2151 	 */
2152 	for (ii = 0; ii < IXL_MAX_LOCN; ii++) {
2153 
2154 		if ((uvp->locn_info[ii].ixlp == uvp->ixlxferp) &&
2155 		    (uvp->locn_info[ii].ixldepth >= uvp->ixldepth) &&
2156 		    (uvp->locn_info[ii].ixldepth <
2157 			(uvp->ixldepth + uvp->ixlcount))) {
2158 
2159 			uvp->upd_status = IXL1394_ERISK_PROHIBITS_UPD;
2160 
2161 			TNF_PROBE_0_DEBUG(hci1394_ixl_update_analysis_exit,
2162 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2163 			return (DDI_FAILURE);
2164 		}
2165 	}
2166 
2167 	/* is save for update to be performed, return ok status */
2168 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_analysis_exit,
2169 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2170 	return (DDI_SUCCESS);
2171 }
2172 
2173 /*
2174  * hci1394_ixl_update_set_locn_info()
2175  *    set up the local list of the IXL_MAX_LOCN next commandPtr locations we
2176  *    expect the hardware to get to in the next 125 microseconds.
2177  */
2178 static void
hci1394_ixl_update_set_locn_info(hci1394_ixl_update_vars_t * uvp)2179 hci1394_ixl_update_set_locn_info(hci1394_ixl_update_vars_t *uvp)
2180 {
2181 	hci1394_iso_ctxt_t	*ctxtp;
2182 	ixl1394_command_t	*ixlp;
2183 	int			ixldepth;
2184 	int			ii;
2185 
2186 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_set_locn_info_enter,
2187 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2188 
2189 	/*
2190 	 * find next xfer start ixl command, starting with current ixl command
2191 	 * where execution last left off
2192 	 */
2193 	ctxtp = uvp->ctxtp;
2194 
2195 	ixldepth = ctxtp->ixl_exec_depth;
2196 	(void) hci1394_ixl_find_next_exec_xfer(ctxtp->ixl_execp, NULL, &ixlp);
2197 
2198 	/*
2199 	 * if the current IXL command wasn't a xfer start command, then reset
2200 	 * the depth to 0 for xfer command found
2201 	 */
2202 	if (ixlp != ctxtp->ixl_execp)
2203 		ixldepth = 0;
2204 
2205 	/*
2206 	 * save xfer start IXL command & its depth and also save location and
2207 	 * depth of the next IXL_MAX_LOCN-1 xfer start IXL commands following
2208 	 * it (if any)
2209 	 */
2210 	for (ii = 0; ii < IXL_MAX_LOCN; ii++) {
2211 		uvp->locn_info[ii].ixlp = ixlp;
2212 		uvp->locn_info[ii].ixldepth = ixldepth;
2213 
2214 		if (ixlp) {
2215 			/*
2216 			 * if more dma commands generated by this xfer command
2217 			 * still follow, use them. else, find the next xfer
2218 			 * start IXL command and set its depth to 0.
2219 			 */
2220 			if (++ixldepth >= ((hci1394_xfer_ctl_t *)
2221 			    ixlp->compiler_privatep)->cnt) {
2222 
2223 				(void) hci1394_ixl_find_next_exec_xfer(
2224 				    ixlp->next_ixlp, NULL, &ixlp);
2225 				ixldepth = 0;
2226 			}
2227 		}
2228 	}
2229 	TNF_PROBE_0_DEBUG(hci1394_ixl_update_set_locn_info_exit,
2230 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2231 }
2232