xref: /illumos-gate/usr/src/uts/common/io/1394/adapters/hci1394_ixl_update.c (revision 2570281cf351044b6936651ce26dbe1f801dcbd8)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2006 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * hci1394_ixl_update.c
28  *    Isochronous IXL update routines.
29  *    Routines used to dynamically update a compiled and presumably running
30  *    IXL program.
31  */
32 
33 #include <sys/kmem.h>
34 #include <sys/types.h>
35 #include <sys/conf.h>
36 #include <sys/disp.h>
37 #include <sys/1394/h1394.h>
38 #include <sys/1394/ixl1394.h>	/* IXL opcodes & data structs */
39 
40 #include <sys/1394/adapters/hci1394.h>
41 
42 
43 /* local defines for hci1394_ixl_update_prepare return codes */
44 #define	IXL_PREP_READY	    1
45 #define	IXL_PREP_SUCCESS    0
46 #define	IXL_PREP_FAILURE    (-1)
47 
48 /*
49  * variable used to indicate the number of times update will wait for
50  * interrupt routine to complete.
51  */
52 int hci1394_upd_retries_before_fail = 50;
53 
54 /* IXL runtime update static functions */
55 static int hci1394_ixl_update_prepare(hci1394_ixl_update_vars_t *uvp);
56 static int hci1394_ixl_update_prep_jump(hci1394_ixl_update_vars_t *uvp);
57 static int hci1394_ixl_update_prep_set_skipmode(hci1394_ixl_update_vars_t *uvp);
58 static int hci1394_ixl_update_prep_set_tagsync(hci1394_ixl_update_vars_t *uvp);
59 static int hci1394_ixl_update_prep_recv_pkt(hci1394_ixl_update_vars_t *uvp);
60 static int hci1394_ixl_update_prep_recv_buf(hci1394_ixl_update_vars_t *uvp);
61 static int hci1394_ixl_update_prep_send_pkt(hci1394_ixl_update_vars_t *uvp);
62 static int hci1394_ixl_update_prep_send_buf(hci1394_ixl_update_vars_t *uvp);
63 static int hci1394_ixl_update_perform(hci1394_ixl_update_vars_t *uvp);
64 static int hci1394_ixl_update_evaluate(hci1394_ixl_update_vars_t *uvp);
65 static int hci1394_ixl_update_analysis(hci1394_ixl_update_vars_t *uvp);
66 static void hci1394_ixl_update_set_locn_info(hci1394_ixl_update_vars_t *uvp);
67 static int hci1394_ixl_update_enable(hci1394_ixl_update_vars_t *uvp);
68 static int hci1394_ixl_update_endup(hci1394_ixl_update_vars_t *uvp);
69 
70 /*
71  *	IXL commands and included fields which can be updated
72  * IXL1394_OP_CALLBACK:		callback(), callback_data
73  * IXL1394_OP_JUMP:		label
74  * IXL1394_OP_RECV_PKT		ixl_buf, size, mem_bufp
75  * IXL1394_OP_RECV_PKT_ST	ixl_buf, size, mem_bufp
76  * IXL1394_OP_RECV_BUF(ppb)	ixl_buf, size, pkt_size, mem_bufp, buf_offset
77  * IXL1394_OP_RECV_BUF(fill)	ixl_buf, size, pkt_size, mem_bufp, buf_offset
78  * IXL1394_OP_SEND_PKT		ixl_buf, size, mem_bufp
79  * IXL1394_OP_SEND_PKT_ST	ixl_buf, size, mem_bufp
80  * IXL1394_OP_SEND_PKT_WHDR_ST	ixl_buf, size, mem_bufp
81  * IXL1394_OP_SEND_BUF		ixl_buf, size, pkt_size, mem_bufp, buf_offset
82  * IXL1394_OP_SET_TAGSYNC	tag, sync
83  * IXL1394_OP_SET_SKIPMODE	skipmode, label
84  *
85  *	IXL commands which can not be updated
86  * IXL1394_OP_LABEL
87  * IXL1394_OP_SEND_HDR_ONLY
88  * IXL1394_OP_SEND_NOPKT
89  * IXL1394_OP_STORE_VALUE
90  * IXL1394_OP_STORE_TIMESTAMP
91  * IXL1394_OP_SET_SYNCWAIT
92  */
93 
94 /*
95  * hci1394_ixl_update
96  *    main entrypoint into dynamic update code: initializes temporary
97  *    update variables, evaluates request, coordinates with potentially
98  *    simultaneous run of interrupt stack, evaluates likelyhood of success,
99  *    performs the update, checks if completed, performs cleanup
100  *    resulting from coordination with interrupt stack.
101  */
102 int
hci1394_ixl_update(hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp,ixl1394_command_t * ixlnewp,ixl1394_command_t * ixloldp,uint_t riskoverride,int * resultp)103 hci1394_ixl_update(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
104     ixl1394_command_t *ixlnewp, ixl1394_command_t *ixloldp,
105     uint_t riskoverride, int *resultp)
106 {
107 	hci1394_ixl_update_vars_t uv;	/* update work variables structure */
108 	int prepstatus;
109 	int ret;
110 
111 	/* save caller specified values in update work variables structure */
112 	uv.soft_statep = soft_statep;
113 	uv.ctxtp = ctxtp;
114 	uv.ixlnewp = ixlnewp;
115 	uv.ixloldp = ixloldp;
116 	uv.risklevel = riskoverride;
117 
118 	/* initialize remainder of update work variables */
119 	uv.ixlxferp = NULL;
120 	uv.skipxferp = NULL;
121 	uv.skipmode = 0;
122 	uv.skipaddr = 0;
123 	uv.jumpaddr = 0;
124 	uv.pkthdr1 = 0;
125 	uv.pkthdr2 = 0;
126 	uv.bufaddr = 0;
127 	uv.bufsize = 0;
128 	uv.ixl_opcode = uv.ixlnewp->ixl_opcode;
129 	uv.hcihdr = 0;
130 	uv.hcistatus = 0;
131 	uv.hci_offset = 0;
132 	uv.hdr_offset = 0;
133 
134 	/* set done ok return status */
135 	uv.upd_status = 0;
136 
137 	/* evaluate request and prepare to perform update */
138 	prepstatus = hci1394_ixl_update_prepare(&uv);
139 	if (prepstatus != IXL_PREP_READY) {
140 		/*
141 		 * if either done or nothing to do or an evaluation error,
142 		 * return update status
143 		 */
144 		*resultp = uv.upd_status;
145 
146 		/* if prep evaluation error, return failure */
147 		if (prepstatus != IXL_PREP_SUCCESS) {
148 			return (DDI_FAILURE);
149 		}
150 		/* if no action or update done, return update successful */
151 		return (DDI_SUCCESS);
152 	}
153 
154 	/* perform update processing reservation of interrupt context */
155 	ret = hci1394_ixl_update_enable(&uv);
156 	if (ret != DDI_SUCCESS) {
157 
158 		/* error acquiring control of context - return */
159 		*resultp = uv.upd_status;
160 
161 		return (DDI_FAILURE);
162 	}
163 
164 	/* perform update risk analysis */
165 	if (hci1394_ixl_update_analysis(&uv) != DDI_SUCCESS) {
166 		/*
167 		 * return, if excessive risk or dma execution processing lost
168 		 * (note: caller risk override not yet implemented)
169 		 */
170 
171 		/* attempt intr processing cleanup, unless err is dmalost */
172 		if (uv.upd_status != IXL1394_EPRE_UPD_DMALOST) {
173 			(void) hci1394_ixl_update_endup(&uv);
174 		} else {
175 			/*
176 			 * error is dmalost, just release interrupt context.
177 			 * take the lock here to ensure an atomic read, modify,
178 			 * write of the "intr_flags" field while we try to
179 			 * clear the "in update" flag.  protects from the
180 			 * interrupt routine.
181 			 */
182 			mutex_enter(&ctxtp->intrprocmutex);
183 			ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
184 			mutex_exit(&ctxtp->intrprocmutex);
185 		}
186 		*resultp = uv.upd_status;
187 
188 		return (DDI_FAILURE);
189 	}
190 
191 
192 	/* perform requested update */
193 	if (hci1394_ixl_update_perform(&uv) != DDI_SUCCESS) {
194 		/*
195 		 * if non-completion condition, return update status
196 		 * attempt interrupt processing cleanup first
197 		 */
198 		(void) hci1394_ixl_update_endup(&uv);
199 
200 		*resultp = uv.upd_status;
201 
202 		return (DDI_FAILURE);
203 	}
204 
205 	/* evaluate update completion, setting completion status */
206 	if (hci1394_ixl_update_evaluate(&uv) != DDI_SUCCESS) {
207 		/*
208 		 * update failed - bad, just release interrupt context
209 		 * take the lock here too (jsut like above) to ensure an
210 		 * atomic read, modify, write of the "intr_flags" field
211 		 * while we try to clear the "in update" flag.  protects
212 		 * from the interrupt routine.
213 		 */
214 		mutex_enter(&ctxtp->intrprocmutex);
215 		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
216 		mutex_exit(&ctxtp->intrprocmutex);
217 
218 		/* if DMA stopped or lost, formally stop context */
219 		if (uv.upd_status == HCI1394_IXL_INTR_DMASTOP) {
220 			hci1394_do_stop(soft_statep, ctxtp, B_TRUE,
221 			    ID1394_DONE);
222 		} else if (uv.upd_status == HCI1394_IXL_INTR_DMALOST) {
223 			hci1394_do_stop(soft_statep, ctxtp, B_TRUE,
224 			    ID1394_FAIL);
225 		}
226 
227 		*resultp = uv.upd_status;
228 
229 		return (DDI_FAILURE);
230 	}
231 
232 	/* perform interrupt processing cleanup */
233 	uv.upd_status = hci1394_ixl_update_endup(&uv);
234 
235 	/* return update completion status */
236 	*resultp = uv.upd_status;
237 
238 	return (DDI_SUCCESS);
239 }
240 
241 /*
242  * hci1394_ixl_update_enable
243  *	Used to coordinate dynamic update activities with simultaneous
244  *	interrupt handler processing, while holding the context mutex
245  *      for as short a time as possible.
246  */
247 static int
hci1394_ixl_update_enable(hci1394_ixl_update_vars_t * uvp)248 hci1394_ixl_update_enable(hci1394_ixl_update_vars_t *uvp)
249 {
250 	int	status;
251 	boolean_t retry;
252 	uint_t	remretries;
253 
254 	retry = B_TRUE;
255 	/* set arbitrary number of retries before giving up */
256 	remretries = hci1394_upd_retries_before_fail;
257 	status = DDI_SUCCESS;
258 
259 	/*
260 	 * if waited for completion of interrupt processing generated callback,
261 	 * retry here
262 	 */
263 	ASSERT(MUTEX_NOT_HELD(&uvp->ctxtp->intrprocmutex));
264 	mutex_enter(&uvp->ctxtp->intrprocmutex);
265 
266 	while (retry == B_TRUE) {
267 		retry = B_FALSE;
268 		remretries--;
269 
270 		/* failure if update processing is already in progress */
271 		if (uvp->ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
272 			uvp->upd_status = IXL1394_EUPDATE_DISALLOWED;
273 			status = DDI_FAILURE;
274 		} else if (uvp->ctxtp->intr_flags & HCI1394_ISO_CTXT_ININTR) {
275 			/*
276 			 * if have retried max number of times or if this update
277 			 * request is on the interrupt stack, which means that
278 			 * the callback function of the target driver initiated
279 			 * the update, set update failure.
280 			 */
281 			if ((remretries <= 0) ||
282 			    (servicing_interrupt())) {
283 				uvp->upd_status = IXL1394_EUPDATE_DISALLOWED;
284 				status = DDI_FAILURE;
285 			} else {
286 				/*
287 				 * if not on interrupt stack and retries not
288 				 * exhausted, free mutex, wait a short time
289 				 * and then retry.
290 				 */
291 				retry = B_TRUE;
292 				mutex_exit(&uvp->ctxtp->intrprocmutex);
293 				drv_usecwait(1);
294 				mutex_enter(&uvp->ctxtp->intrprocmutex);
295 				continue;
296 			}
297 		} else if (uvp->ctxtp->intr_flags & HCI1394_ISO_CTXT_INCALL) {
298 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
299 			status = DDI_FAILURE;
300 		}
301 	}
302 
303 	/* if context is available, reserve it for this update request */
304 	if (status == DDI_SUCCESS) {
305 		uvp->ctxtp->intr_flags |= HCI1394_ISO_CTXT_INUPDATE;
306 	}
307 
308 	ASSERT(MUTEX_HELD(&uvp->ctxtp->intrprocmutex));
309 	mutex_exit(&uvp->ctxtp->intrprocmutex);
310 
311 	return (status);
312 }
313 
314 /*
315  * hci1394_ixl_update_endup()
316  *    The ending stage of coordinating with simultaneously running interrupts.
317  *    Perform interrupt processing sync tasks if we (update) had blocked the
318  *    interrupt out when it wanted a turn.
319  */
320 static int
hci1394_ixl_update_endup(hci1394_ixl_update_vars_t * uvp)321 hci1394_ixl_update_endup(hci1394_ixl_update_vars_t *uvp)
322 {
323 	uint_t status;
324 	hci1394_iso_ctxt_t *ctxtp;
325 
326 	status = HCI1394_IXL_INTR_NOERROR;
327 	ctxtp = uvp->ctxtp;
328 
329 	while (ctxtp->intr_flags & HCI1394_ISO_CTXT_INUPDATE) {
330 
331 		if (ctxtp->intr_flags & HCI1394_ISO_CTXT_INTRSET) {
332 			/*
333 			 * We don't need to grab the lock here because
334 			 * the "intr_flags" field is only modified in two
335 			 * ways - one in UPDATE and one in INTR routine. Since
336 			 * we know that it can't be modified simulataneously
337 			 * in another UDPATE thread - that is assured by the
338 			 * checks in "update_enable" - we would only be trying
339 			 * to protect against the INTR thread.  And since we
340 			 * are going to clear a bit here (and check it again
341 			 * at the top of the loop) we are not really concerned
342 			 * about missing its being set by the INTR routine.
343 			 */
344 			ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
345 
346 			status = hci1394_ixl_dma_sync(uvp->soft_statep, ctxtp);
347 			if (status ==  HCI1394_IXL_INTR_DMALOST) {
348 				/*
349 				 * Unlike above, we do care here as we are
350 				 * trying to clear the "in update" flag, and
351 				 * we don't want that lost because the INTR
352 				 * routine is trying to set its flag.
353 				 */
354 				mutex_enter(&uvp->ctxtp->intrprocmutex);
355 				ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
356 				mutex_exit(&uvp->ctxtp->intrprocmutex);
357 				continue;
358 			}
359 		}
360 
361 		ASSERT(MUTEX_NOT_HELD(&uvp->ctxtp->intrprocmutex));
362 		mutex_enter(&uvp->ctxtp->intrprocmutex);
363 		if (!(ctxtp->intr_flags & HCI1394_ISO_CTXT_INTRSET)) {
364 			ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INUPDATE;
365 		}
366 		mutex_exit(&uvp->ctxtp->intrprocmutex);
367 	}
368 
369 	/* if DMA stopped or lost, formally stop context */
370 	if (status == HCI1394_IXL_INTR_DMASTOP) {
371 		hci1394_do_stop(uvp->soft_statep, ctxtp, B_TRUE, ID1394_DONE);
372 	} else if (status == HCI1394_IXL_INTR_DMALOST) {
373 		hci1394_do_stop(uvp->soft_statep, ctxtp, B_TRUE, ID1394_FAIL);
374 	}
375 
376 	return (status);
377 }
378 
379 /*
380  * hci1394_ixl_update_prepare()
381  *    Preparation for the actual update (using temp uvp struct)
382  */
383 static int
hci1394_ixl_update_prepare(hci1394_ixl_update_vars_t * uvp)384 hci1394_ixl_update_prepare(hci1394_ixl_update_vars_t *uvp)
385 {
386 	int		    ret;
387 
388 	/* both new and old ixl commands must be the same */
389 	if (uvp->ixlnewp->ixl_opcode != uvp->ixloldp->ixl_opcode) {
390 
391 		uvp->upd_status = IXL1394_EOPCODE_MISMATCH;
392 
393 		return (IXL_PREP_FAILURE);
394 	}
395 
396 	/*
397 	 * perform evaluation and prepare update based on specific
398 	 * IXL command type
399 	 */
400 	switch (uvp->ixl_opcode) {
401 
402 	case IXL1394_OP_CALLBACK_U: {
403 		ixl1394_callback_t *old_callback_ixlp;
404 		ixl1394_callback_t *new_callback_ixlp;
405 
406 		old_callback_ixlp = (ixl1394_callback_t *)uvp->ixloldp;
407 		new_callback_ixlp = (ixl1394_callback_t *)uvp->ixlnewp;
408 
409 		/* perform update now without further evaluation */
410 		old_callback_ixlp->callback_arg =
411 		    new_callback_ixlp->callback_arg;
412 		old_callback_ixlp->callback = new_callback_ixlp->callback;
413 
414 		/* nothing else to do, return with done ok status */
415 		return (IXL_PREP_SUCCESS);
416 	}
417 
418 	case IXL1394_OP_JUMP_U:
419 		ret = hci1394_ixl_update_prep_jump(uvp);
420 
421 		return (ret);
422 
423 	case IXL1394_OP_SET_SKIPMODE_U:
424 		ret = hci1394_ixl_update_prep_set_skipmode(uvp);
425 
426 		return (ret);
427 
428 	case IXL1394_OP_SET_TAGSYNC_U:
429 		ret = hci1394_ixl_update_prep_set_tagsync(uvp);
430 
431 		return (ret);
432 
433 	case IXL1394_OP_RECV_PKT_U:
434 	case IXL1394_OP_RECV_PKT_ST_U:
435 		ret = hci1394_ixl_update_prep_recv_pkt(uvp);
436 
437 		return (ret);
438 
439 	case IXL1394_OP_RECV_BUF_U:
440 		ret = hci1394_ixl_update_prep_recv_buf(uvp);
441 
442 		return (ret);
443 
444 	case IXL1394_OP_SEND_PKT_U:
445 	case IXL1394_OP_SEND_PKT_ST_U:
446 	case IXL1394_OP_SEND_PKT_WHDR_ST_U:
447 		ret = hci1394_ixl_update_prep_send_pkt(uvp);
448 
449 		return (ret);
450 
451 	case IXL1394_OP_SEND_BUF_U:
452 		ret = hci1394_ixl_update_prep_send_buf(uvp);
453 
454 		return (ret);
455 
456 	default:
457 		/* ixl command being updated must be one of above, else error */
458 		uvp->upd_status = IXL1394_EOPCODE_DISALLOWED;
459 
460 		return (IXL_PREP_FAILURE);
461 	}
462 }
463 
464 /*
465  * hci1394_ixl_update_prep_jump()
466  *    Preparation for update of an IXL1394_OP_JUMP_U command.
467  */
468 static int
hci1394_ixl_update_prep_jump(hci1394_ixl_update_vars_t * uvp)469 hci1394_ixl_update_prep_jump(hci1394_ixl_update_vars_t *uvp)
470 {
471 	ixl1394_jump_t	    *old_jump_ixlp;
472 	ixl1394_jump_t	    *new_jump_ixlp;
473 	ixl1394_command_t   *ixlp;
474 	hci1394_xfer_ctl_t  *xferctlp;
475 	hci1394_desc_t	    *hcidescp;
476 	uint_t		    cbcnt;
477 	ddi_acc_handle_t    acc_hdl;
478 	ddi_dma_handle_t    dma_hdl;
479 	uint32_t	    desc_hdr;
480 	int		    err;
481 
482 	old_jump_ixlp = (ixl1394_jump_t *)uvp->ixloldp;
483 	new_jump_ixlp = (ixl1394_jump_t *)uvp->ixlnewp;
484 
485 	/* check if any change between new and old ixl jump command */
486 	if (new_jump_ixlp->label == old_jump_ixlp->label) {
487 
488 		/* if none, return with done ok status */
489 		return (IXL_PREP_SUCCESS);
490 	}
491 
492 	/* new ixl jump command label must be ptr to valid ixl label or NULL */
493 	if ((new_jump_ixlp->label != NULL) &&
494 	    (new_jump_ixlp->label->ixl_opcode != IXL1394_OP_LABEL)) {
495 
496 		/* if not jumping to label, return an error */
497 		uvp->upd_status = IXL1394_EJUMP_NOT_TO_LABEL;
498 
499 		return (IXL_PREP_FAILURE);
500 	}
501 
502 	/*
503 	 * follow exec path from new ixl jump command label to determine new
504 	 * jump destination ixl xfer command
505 	 */
506 	(void) hci1394_ixl_find_next_exec_xfer(new_jump_ixlp->label, &cbcnt,
507 	    &ixlp);
508 	if (ixlp != NULL) {
509 		/*
510 		 * get the bound address of the first descriptor block reached
511 		 * by the jump destination.  (This descriptor is the first
512 		 * transfer command following the jumped-to label.)  Set the
513 		 * descriptor's address (with Z bits) into jumpaddr.
514 		 */
515 		uvp->jumpaddr = ((hci1394_xfer_ctl_t *)
516 		    ixlp->compiler_privatep)->dma[0].dma_bound;
517 	}
518 
519 	/*
520 	 * get associated xfer IXL command from compiler_privatep of old
521 	 * jump command
522 	 */
523 	if ((uvp->ixlxferp = (ixl1394_command_t *)
524 	    old_jump_ixlp->compiler_privatep) == NULL) {
525 
526 		/* if none, return an error */
527 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
528 
529 		return (IXL_PREP_FAILURE);
530 	}
531 
532 	/*
533 	 * get the associated IXL xfer command's last dma descriptor block
534 	 * last descriptor, then get hcihdr from its hdr field,
535 	 * removing interrupt enabled bits
536 	 */
537 	xferctlp = (hci1394_xfer_ctl_t *)uvp->ixlxferp->compiler_privatep;
538 	hcidescp = (hci1394_desc_t *)xferctlp->dma[xferctlp->cnt - 1].dma_descp;
539 	acc_hdl  = xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_handle;
540 	dma_hdl  = xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_dma_handle;
541 
542 	/* Sync the descriptor before we grab the header(s) */
543 	err = ddi_dma_sync(dma_hdl, (off_t)hcidescp, sizeof (hci1394_desc_t),
544 	    DDI_DMA_SYNC_FORCPU);
545 	if (err != DDI_SUCCESS) {
546 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
547 
548 		return (IXL_PREP_FAILURE);
549 	}
550 
551 	desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
552 	uvp->hcihdr = desc_hdr & ~DESC_INTR_ENBL;
553 
554 	/* set depth to last dma descriptor block & update count to 1 */
555 	uvp->ixldepth = xferctlp->cnt - 1;
556 	uvp->ixlcount = 1;
557 
558 	/*
559 	 * if there is only one dma descriptor block and IXL xfer command
560 	 * inited by a label or have found callbacks along the exec path to the
561 	 * new destination IXL xfer command, enable interrupt in hcihdr value
562 	 */
563 	if (((xferctlp->cnt == 1) &&
564 	    ((xferctlp->ctl_flags & XCTL_LABELLED) != 0)) || (cbcnt != 0)) {
565 
566 		uvp->hcihdr |= DESC_INTR_ENBL;
567 	}
568 
569 	/* If either old or new destination was/is NULL, enable interrupt */
570 	if ((new_jump_ixlp->label == NULL) || (old_jump_ixlp->label == NULL)) {
571 		uvp->hcihdr |= DESC_INTR_ENBL;
572 	}
573 
574 	/*
575 	 * if xfer type is xmit and skip mode for this for this xfer command is
576 	 * IXL1394_SKIP_TO_NEXT then set uvp->skipmode to IXL1394_SKIP_TO_NEXT
577 	 * and set uvp->skipxferp to uvp->jumpaddr and set uvp->hci_offset to
578 	 * offset from last dma descriptor to first dma descriptor
579 	 * (where skipaddr goes).
580 	 *
581 	 * update perform processing will have to set skip branch address to
582 	 * same location as jump destination in this case.
583 	 */
584 	uvp->skipmode = IXL1394_SKIP_TO_STOP;
585 	if ((uvp->ixlxferp->ixl_opcode & IXL1394_OPF_ONXMIT) != 0) {
586 
587 		if ((xferctlp->skipmodep && (((ixl1394_set_skipmode_t *)
588 		    xferctlp->skipmodep)->skipmode == IXL1394_SKIP_TO_NEXT)) ||
589 		    (uvp->ctxtp->default_skipmode == IXL1394_OPF_ONXMIT)) {
590 
591 			uvp->skipmode = IXL1394_SKIP_TO_NEXT;
592 			uvp->skipaddr = uvp->jumpaddr;
593 
594 			/*
595 			 * calc hci_offset to first descriptor (where skipaddr
596 			 * goes) of dma descriptor block from current (last)
597 			 * descriptor of the descriptor block (accessed in
598 			 * xfer_ctl dma_descp of IXL xfer command)
599 			 */
600 			if (uvp->ixlxferp->ixl_opcode ==
601 			    IXL1394_OP_SEND_HDR_ONLY) {
602 				/*
603 				 * send header only is (Z bits - 2)
604 				 * descriptor components back from last one
605 				 */
606 				uvp->hci_offset -= 2;
607 			} else {
608 				/*
609 				 * all others are (Z bits - 1) descriptor
610 				 * components back from last component
611 				 */
612 				uvp->hci_offset -= 1;
613 			}
614 		}
615 	}
616 	return (IXL_PREP_READY);
617 }
618 
619 /*
620  * hci1394_ixl_update_prep_set_skipmode()
621  *    Preparation for update of an IXL1394_OP_SET_SKIPMODE_U command.
622  */
623 static int
hci1394_ixl_update_prep_set_skipmode(hci1394_ixl_update_vars_t * uvp)624 hci1394_ixl_update_prep_set_skipmode(hci1394_ixl_update_vars_t *uvp)
625 {
626 	ixl1394_set_skipmode_t	*old_set_skipmode_ixlp;
627 	ixl1394_set_skipmode_t	*new_set_skipmode_ixlp;
628 	ixl1394_command_t	*ixlp;
629 	hci1394_xfer_ctl_t	*xferctlp;
630 
631 	old_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixloldp;
632 	new_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixlnewp;
633 
634 	/* check if new set skipmode is change from old set skipmode */
635 	if (new_set_skipmode_ixlp->skipmode ==
636 	    old_set_skipmode_ixlp->skipmode) {
637 
638 		if ((new_set_skipmode_ixlp->skipmode !=
639 		    IXL1394_SKIP_TO_LABEL) ||
640 		    (old_set_skipmode_ixlp->label ==
641 		    new_set_skipmode_ixlp->label)) {
642 			/* No change, return with done ok status */
643 			return (IXL_PREP_SUCCESS);
644 		}
645 	}
646 
647 	/* find associated ixl xfer commnd by following old ixl links */
648 	uvp->ixlxferp = uvp->ixloldp->next_ixlp;
649 	while ((uvp->ixlxferp != NULL) && (((uvp->ixlxferp->ixl_opcode &
650 	    IXL1394_OPF_ISXFER) == 0) ||
651 	    ((uvp->ixlxferp->ixl_opcode & IXL1394_OPTY_MASK) !=	0))) {
652 
653 		uvp->ixlxferp = uvp->ixlxferp->next_ixlp;
654 	}
655 
656 	/* return an error if no ixl xfer command found */
657 	if (uvp->ixlxferp == NULL) {
658 
659 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
660 
661 		return (IXL_PREP_FAILURE);
662 	}
663 
664 	/*
665 	 * get Z bits (number of descriptor components in descriptor block)
666 	 * from a dma bound addr in the xfer_ctl struct of the IXL xfer command
667 	 */
668 	if ((xferctlp = (hci1394_xfer_ctl_t *)
669 	    uvp->ixlxferp->compiler_privatep) == NULL) {
670 
671 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
672 
673 		return (IXL_PREP_FAILURE);
674 	}
675 	uvp->hci_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK;
676 
677 	/*
678 	 * determine hci_offset to first component (where skipaddr goes) of
679 	 * dma descriptor block from current (last) descriptor component of
680 	 * desciptor block (accessed in xfer_ctl dma_descp of IXL xfer command)
681 	 */
682 	if (uvp->ixlxferp->ixl_opcode == IXL1394_OP_SEND_HDR_ONLY) {
683 		/*
684 		 * "send header only" is (Z bits - 2) descriptors back
685 		 * from last one
686 		 */
687 		uvp->hci_offset -= 2;
688 	} else {
689 		/*
690 		 * all others are (Z bits - 1) descroptors back from
691 		 * last descriptor.
692 		 */
693 		uvp->hci_offset -= 1;
694 	}
695 
696 	/* set depth to zero and count to update all dma descriptors */
697 	uvp->ixldepth = 0;
698 	uvp->ixlcount = xferctlp->cnt;
699 
700 	/* set new skipmode and validate */
701 	uvp->skipmode = new_set_skipmode_ixlp->skipmode;
702 
703 	if ((uvp->skipmode != IXL1394_SKIP_TO_NEXT) &&
704 	    (uvp->skipmode != IXL1394_SKIP_TO_SELF) &&
705 	    (uvp->skipmode != IXL1394_SKIP_TO_STOP) &&
706 	    (uvp->skipmode != IXL1394_SKIP_TO_LABEL)) {
707 
708 		/* return an error if invalid mode */
709 		uvp->upd_status = IXL1394_EBAD_SKIPMODE;
710 
711 		return (IXL_PREP_FAILURE);
712 	}
713 
714 	/* if mode is skip to label */
715 	if (uvp->skipmode == IXL1394_SKIP_TO_LABEL) {
716 
717 		/* verify label field is valid ixl label cmd */
718 		if ((new_set_skipmode_ixlp->label == NULL) ||
719 		    (new_set_skipmode_ixlp->label->ixl_opcode !=
720 		    IXL1394_OP_LABEL)) {
721 
722 			/* Error - not skipping to valid label */
723 			uvp->upd_status = IXL1394_EBAD_SKIP_LABEL;
724 
725 			return (IXL_PREP_FAILURE);
726 		}
727 
728 		/*
729 		 * follow new skip exec path after label to next xfer
730 		 * IXL command
731 		 */
732 		(void) hci1394_ixl_find_next_exec_xfer(
733 		    new_set_skipmode_ixlp->label, NULL, &ixlp);
734 
735 		/*
736 		 * set skip destination IXL xfer command.
737 		 * after update set into old set skip mode IXL compiler_privatep
738 		 */
739 		if ((uvp->skipxferp = ixlp) != NULL) {
740 			/*
741 			 * set skipaddr to be the first dma descriptor block's
742 			 * dma bound address w/Z bits
743 			 */
744 			xferctlp = (hci1394_xfer_ctl_t *)
745 			    ixlp->compiler_privatep;
746 			uvp->skipaddr = xferctlp->dma[0].dma_bound;
747 		}
748 	}
749 
750 	/*
751 	 * if mode is skip to next, get skipaddr for last dma descriptor block
752 	 */
753 	if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
754 		/* follow normal exec path to next xfer ixl command */
755 		(void) hci1394_ixl_find_next_exec_xfer(uvp->ixlxferp->next_ixlp,
756 		    NULL, &ixlp);
757 
758 		/*
759 		 * get skip_next destination IXL xfer command
760 		 * (for last iteration)
761 		 */
762 		if (ixlp != NULL) {
763 			/*
764 			 * set skipaddr to first dma descriptor block's
765 			 * dma bound address w/Z bits
766 			 */
767 			xferctlp = (hci1394_xfer_ctl_t *)
768 			    ixlp->compiler_privatep;
769 			uvp->skipaddr = xferctlp->dma[0].dma_bound;
770 		}
771 	}
772 	return (IXL_PREP_READY);
773 }
774 
775 /*
776  * hci1394_ixl_update_prep_set_tagsync()
777  *    Preparation for update of an IXL1394_OP_SET_TAGSYNC_U command.
778  */
779 static int
hci1394_ixl_update_prep_set_tagsync(hci1394_ixl_update_vars_t * uvp)780 hci1394_ixl_update_prep_set_tagsync(hci1394_ixl_update_vars_t *uvp)
781 {
782 	ixl1394_set_tagsync_t	*old_set_tagsync_ixlp;
783 	ixl1394_set_tagsync_t	*new_set_tagsync_ixlp;
784 	hci1394_xfer_ctl_t	*xferctlp;
785 
786 	old_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixloldp;
787 	new_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixlnewp;
788 
789 	/* check if new set tagsync is change from old set tagsync. */
790 	if ((new_set_tagsync_ixlp->tag == old_set_tagsync_ixlp->tag) &&
791 	    (new_set_tagsync_ixlp->sync == old_set_tagsync_ixlp->sync)) {
792 
793 		/* no change, return with done ok status */
794 		return (IXL_PREP_SUCCESS);
795 	}
796 
797 	/* find associated IXL xfer commnd by following old ixl links */
798 	uvp->ixlxferp = uvp->ixloldp->next_ixlp;
799 	while ((uvp->ixlxferp != NULL) && (((uvp->ixlxferp->ixl_opcode &
800 	    IXL1394_OPF_ISXFER) == 0) ||
801 	    ((uvp->ixlxferp->ixl_opcode & IXL1394_OPTY_MASK) != 0))) {
802 
803 		uvp->ixlxferp = uvp->ixlxferp->next_ixlp;
804 	}
805 
806 	/* return an error if no IXL xfer command found */
807 	if (uvp->ixlxferp == NULL) {
808 
809 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
810 
811 		return (IXL_PREP_FAILURE);
812 	}
813 
814 	/* is IXL xfer command an IXL1394_OP_SEND_NO_PKT? */
815 	if (uvp->ixlxferp->ixl_opcode == IXL1394_OP_SEND_NO_PKT) {
816 		/* no update needed, return done ok status */
817 		return (IXL_PREP_SUCCESS);
818 	}
819 
820 	/* build new pkthdr1 from new IXL tag/sync bits */
821 	uvp->pkthdr1 = (uvp->ctxtp->isospd << DESC_PKT_SPD_SHIFT) |
822 	    (new_set_tagsync_ixlp->tag << DESC_PKT_TAG_SHIFT) |
823 	    (uvp->ctxtp->isochan << DESC_PKT_CHAN_SHIFT) |
824 	    (new_set_tagsync_ixlp->sync << DESC_PKT_SY_SHIFT);
825 
826 	/*
827 	 * get Z bits (# of descriptor components in descriptor block) from
828 	 * any dma bound address in the xfer_ctl struct of the IXL xfer cmd
829 	 */
830 	if ((xferctlp =	(hci1394_xfer_ctl_t *)
831 	    uvp->ixlxferp->compiler_privatep) == NULL) {
832 
833 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
834 
835 		return (IXL_PREP_FAILURE);
836 	}
837 	uvp->hdr_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK;
838 
839 	/*
840 	 * determine hdr_offset from the current(last) descriptor of the
841 	 * DMA descriptor block to the descriptor where pkthdr1 goes
842 	 * by examining IXL xfer command
843 	 */
844 	if (uvp->ixlxferp->ixl_opcode == IXL1394_OP_SEND_HDR_ONLY) {
845 		/*
846 		 * if IXL send header only, the current (last)
847 		 * descriptor is the one
848 		 */
849 		uvp->hdr_offset = 0;
850 	} else {
851 		/*
852 		 * all others are the first descriptor (Z bits - 1)
853 		 * back from the last
854 		 */
855 		uvp->hdr_offset -= 1;
856 	}
857 
858 	/* set depth to zero and count to update all dma descriptors */
859 	uvp->ixldepth = 0;
860 	uvp->ixlcount = xferctlp->cnt;
861 
862 	return (IXL_PREP_READY);
863 }
864 
865 /*
866  * hci1394_ixl_update_prep_recv_pkt()
867  *    Preparation for update of an IXL1394_OP_RECV_PKT_U or
868  *    IXL1394_OP_RECV_PKT_ST_U command.
869  */
870 static int
hci1394_ixl_update_prep_recv_pkt(hci1394_ixl_update_vars_t * uvp)871 hci1394_ixl_update_prep_recv_pkt(hci1394_ixl_update_vars_t *uvp)
872 {
873 	ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
874 	ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
875 	hci1394_xfer_ctl_t *xferctlp;
876 	hci1394_desc_t	   *hcidescp;
877 	ddi_acc_handle_t   acc_hdl;
878 	ddi_dma_handle_t   dma_hdl;
879 	uint32_t	   desc_hdr;
880 	int		   err;
881 
882 	old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
883 	new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
884 
885 	/* check if any change between new and old IXL xfer commands */
886 	if ((new_xfer_pkt_ixlp->size == old_xfer_pkt_ixlp->size) &&
887 	    (new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr ==
888 	    old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr) &&
889 	    (new_xfer_pkt_ixlp->mem_bufp == old_xfer_pkt_ixlp->mem_bufp)) {
890 		/* no change. return with done ok status */
891 		return (IXL_PREP_SUCCESS);
892 	}
893 
894 	/* if new IXL buffer addrs are null, return error */
895 	if ((new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr == 0) ||
896 	    (new_xfer_pkt_ixlp->mem_bufp == NULL)) {
897 
898 		uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
899 
900 		return (IXL_PREP_FAILURE);
901 	}
902 
903 	/* if IXL xfer command is not xfer start command */
904 	if (uvp->ixl_opcode == IXL1394_OP_RECV_PKT_U) {
905 		/*
906 		 * find IXL xfer start command in the compiler_privatep of the
907 		 * old IXL xfer command
908 		 */
909 		uvp->ixlxferp = (ixl1394_command_t *)
910 		    uvp->ixloldp->compiler_privatep;
911 
912 		if (uvp->ixlxferp == NULL) {
913 			/* Error - no IXL xfer start command found */
914 			uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
915 			return (IXL_PREP_FAILURE);
916 		}
917 	} else {
918 		/* IXL xfer command is the IXL xfer start command */
919 		uvp->ixlxferp = uvp->ixloldp;
920 	}
921 
922 	/* check that xfer_ctl is present in the IXL xfer start command */
923 	if ((xferctlp = (hci1394_xfer_ctl_t *)
924 	    uvp->ixlxferp->compiler_privatep) == NULL) {
925 		/* Error - no xfer_ctl struct found */
926 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
927 		return (IXL_PREP_FAILURE);
928 	}
929 
930 	/* set depth to zero and count to 1 to update dma descriptor */
931 	uvp->ixldepth = 0;
932 	uvp->ixlcount = 1;
933 
934 	/*
935 	 * get Z bits (number of descriptors in descriptor block) from the DMA
936 	 * bound address in the xfer_ctl struct of the IXL xfer start cpmmand.
937 	 */
938 	uvp->hci_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK;
939 
940 	/*
941 	 * set offset from the current(last) descriptor to the descriptor for
942 	 * this packet command
943 	 */
944 	uvp->hci_offset -= (1 + uvp->ixloldp->compiler_resv);
945 
946 	/*
947 	 * set bufsize to the new IXL xfer size, and bufaddr to the new
948 	 * IXL xfer bufp
949 	 */
950 	uvp->bufsize = ((ixl1394_xfer_pkt_t *)uvp->ixlnewp)->size;
951 	uvp->bufaddr = ((ixl1394_xfer_pkt_t *)
952 	    uvp->ixlnewp)->ixl_buf.ixldmac_addr;
953 
954 	/*
955 	 * update old hcihdr w/new bufsize, set hcistatus rescnt to
956 	 * new bufsize
957 	 */
958 	hcidescp = (hci1394_desc_t *)xferctlp->dma[0].dma_descp -
959 	    uvp->hci_offset;
960 	acc_hdl  = xferctlp->dma[0].dma_buf->bi_handle;
961 	dma_hdl  = xferctlp->dma[0].dma_buf->bi_dma_handle;
962 
963 	/* Sync the descriptor before we grab the header(s) */
964 	err = ddi_dma_sync(dma_hdl, (off_t)hcidescp, sizeof (hci1394_desc_t),
965 	    DDI_DMA_SYNC_FORCPU);
966 	if (err != DDI_SUCCESS) {
967 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
968 		return (IXL_PREP_FAILURE);
969 	}
970 
971 	desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
972 	uvp->hcihdr = desc_hdr;
973 	uvp->hcihdr &= ~DESC_HDR_REQCOUNT_MASK;
974 	uvp->hcihdr |= (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
975 	    DESC_HDR_REQCOUNT_MASK;
976 	uvp->hcistatus = (uvp->bufsize << DESC_ST_RESCOUNT_SHIFT) &
977 	    DESC_ST_RESCOUNT_MASK;
978 
979 	return (IXL_PREP_READY);
980 }
981 
982 /*
983  * hci1394_ixl_update_prep_recv_buf()
984  *    Preparation for update of an IXL1394_OP_RECV_BUF_U command.
985  */
986 static int
hci1394_ixl_update_prep_recv_buf(hci1394_ixl_update_vars_t * uvp)987 hci1394_ixl_update_prep_recv_buf(hci1394_ixl_update_vars_t *uvp)
988 {
989 	ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
990 	ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
991 	hci1394_xfer_ctl_t *xferctlp;
992 
993 	old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
994 	new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
995 
996 	/* check if any change between new and old IXL xfer commands */
997 	if ((new_xfer_buf_ixlp->size ==	old_xfer_buf_ixlp->size) &&
998 	    (new_xfer_buf_ixlp->ixl_buf.ixldmac_addr ==
999 	    old_xfer_buf_ixlp->ixl_buf.ixldmac_addr) &&
1000 	    (new_xfer_buf_ixlp->mem_bufp == old_xfer_buf_ixlp->mem_bufp)) {
1001 
1002 		if (((uvp->ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) != 0) ||
1003 		    (new_xfer_buf_ixlp->pkt_size ==
1004 		    old_xfer_buf_ixlp->pkt_size)) {
1005 			/* no change. return with done ok status */
1006 			return (IXL_PREP_SUCCESS);
1007 		}
1008 	}
1009 
1010 	/* if new IXL buffer addrs are null, return error */
1011 	if ((new_xfer_buf_ixlp->ixl_buf.ixldmac_addr == 0) ||
1012 	    (new_xfer_buf_ixlp->mem_bufp == NULL)) {
1013 		uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
1014 		return (IXL_PREP_FAILURE);
1015 	}
1016 
1017 	/*
1018 	 * if not buffer fill mode, check that the new pkt_size > 0 and
1019 	 * new size/pkt_size doesn't change the count of dma descriptor
1020 	 * blocks required
1021 	 */
1022 	if ((uvp->ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) == 0) {
1023 		if ((new_xfer_buf_ixlp->pkt_size == 0) ||
1024 		    ((new_xfer_buf_ixlp->size /	new_xfer_buf_ixlp->pkt_size) !=
1025 		    (old_xfer_buf_ixlp->size / old_xfer_buf_ixlp->pkt_size))) {
1026 			/* count changes. return an error */
1027 			uvp->upd_status = IXL1394_EXFER_BUF_CNT_DIFF;
1028 			return (IXL_PREP_FAILURE);
1029 		}
1030 	}
1031 
1032 	/* set old IXL xfer command as the current IXL xfer command */
1033 	uvp->ixlxferp = uvp->ixloldp;
1034 
1035 	/* check that the xfer_ctl struct is present in IXL xfer command */
1036 	if ((xferctlp = (hci1394_xfer_ctl_t *)uvp->ixlxferp->compiler_privatep)
1037 	    == NULL) {
1038 		/* return an error if no xfer_ctl struct is found for command */
1039 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1040 		return (IXL_PREP_FAILURE);
1041 	}
1042 
1043 	/* set depth to zero and count to update all dma descriptors */
1044 	uvp->ixldepth = 0;
1045 	uvp->ixlcount = xferctlp->cnt;
1046 
1047 	/* set bufsize to new pkt_size (or to new size if buffer fill mode) */
1048 	if ((uvp->ctxtp->ctxt_flags & HCI1394_ISO_CTXT_BFFILL) == 0) {
1049 		uvp->bufsize = new_xfer_buf_ixlp->pkt_size;
1050 	} else {
1051 		uvp->bufsize = new_xfer_buf_ixlp->size;
1052 	}
1053 
1054 	/* set bufaddr to new ixl_buf */
1055 	uvp->bufaddr = new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
1056 
1057 	/* set hcihdr reqcnt and hcistatus rescnt to new bufsize */
1058 	uvp->hci_offset = 0;
1059 	uvp->hcihdr = (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
1060 	    DESC_HDR_REQCOUNT_MASK;
1061 	uvp->hcistatus = (uvp->bufsize << DESC_ST_RESCOUNT_SHIFT) &
1062 	    DESC_ST_RESCOUNT_MASK;
1063 
1064 	return (IXL_PREP_READY);
1065 }
1066 
1067 /*
1068  * hci1394_ixl_update_prep_send_pkt()
1069  *    Preparation for update of an IXL1394_OP_SEND_PKT_U command,
1070  *    IXL1394_OP_SEND_PKT_ST_U command and IXL1394_OP_SEND_PKT_WHDR_ST_U
1071  *    command.
1072  */
1073 static int
hci1394_ixl_update_prep_send_pkt(hci1394_ixl_update_vars_t * uvp)1074 hci1394_ixl_update_prep_send_pkt(hci1394_ixl_update_vars_t *uvp)
1075 {
1076 	ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
1077 	ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
1078 	hci1394_xfer_ctl_t *xferctlp;
1079 	hci1394_desc_imm_t *hcidescp;
1080 	ddi_acc_handle_t   acc_hdl;
1081 	ddi_dma_handle_t   dma_hdl;
1082 	uint32_t	   desc_hdr, desc_hdr2;
1083 	int		   err;
1084 
1085 	old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
1086 	new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
1087 
1088 	/* check if any change between new and old IXL xfer commands */
1089 	if ((new_xfer_pkt_ixlp->size ==	old_xfer_pkt_ixlp->size) &&
1090 	    (new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr ==
1091 	    old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr) &&
1092 	    (new_xfer_pkt_ixlp->mem_bufp == old_xfer_pkt_ixlp->mem_bufp)) {
1093 
1094 		/* if none, return with done ok status */
1095 		return (IXL_PREP_SUCCESS);
1096 	}
1097 
1098 	/* if new ixl buffer addrs are null, return error */
1099 	if ((new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr == 0) ||
1100 	    (new_xfer_pkt_ixlp->mem_bufp == NULL)) {
1101 
1102 		uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
1103 
1104 		return (IXL_PREP_FAILURE);
1105 	}
1106 
1107 	/* error if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode and size < 4 */
1108 	if ((uvp->ixl_opcode == IXL1394_OP_SEND_PKT_WHDR_ST_U) &&
1109 	    (new_xfer_pkt_ixlp->size < 4)) {
1110 
1111 		uvp->upd_status = IXL1394_EPKT_HDR_MISSING;
1112 
1113 		return (IXL_PREP_FAILURE);
1114 	}
1115 
1116 	/* if IXL xfer command is not an IXL xfer start command */
1117 	if (uvp->ixl_opcode == IXL1394_OP_SEND_PKT_U) {
1118 		/*
1119 		 * find IXL xfer start command in the compiler_privatep of the
1120 		 * old IXL xfer command
1121 		 */
1122 		uvp->ixlxferp = (ixl1394_command_t *)
1123 		    old_xfer_pkt_ixlp->compiler_privatep;
1124 
1125 		if (uvp->ixlxferp == NULL) {
1126 			/* error if no IXL xfer start command found */
1127 			uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1128 
1129 			return (IXL_PREP_FAILURE);
1130 		}
1131 	} else {
1132 		/* IXL xfer command is the IXL xfer start command */
1133 		uvp->ixlxferp = uvp->ixloldp;
1134 	}
1135 
1136 	/*
1137 	 * get Z bits (number of descriptor components in the descriptor block)
1138 	 * from a dma bound address in the xfer_ctl structure of the IXL
1139 	 * xfer start command
1140 	 */
1141 	if ((xferctlp = (hci1394_xfer_ctl_t *)
1142 	    uvp->ixlxferp->compiler_privatep) == NULL) {
1143 
1144 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1145 
1146 		return (IXL_PREP_FAILURE);
1147 	}
1148 
1149 	/* set depth to zero and count to 1 to update dma descriptor */
1150 	uvp->ixldepth = 0;
1151 	uvp->ixlcount = 1;
1152 
1153 	/*
1154 	 * set offset to the header(first) descriptor from the
1155 	 * current(last) descriptor
1156 	 */
1157 	uvp->hdr_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK - 1;
1158 
1159 	/*
1160 	 * set offset from the current(last) descriptor to the descriptor for
1161 	 * this packet command
1162 	 */
1163 	uvp->hci_offset = uvp->hdr_offset - 2 - uvp->ixloldp->compiler_resv;
1164 
1165 	/* set bufsize to new pkt buffr size, set bufaddr to new bufp */
1166 	uvp->bufsize = new_xfer_pkt_ixlp->size;
1167 	uvp->bufaddr = new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr;
1168 
1169 	/*
1170 	 * if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode, adjust size & buff,
1171 	 * step over hdr
1172 	 */
1173 	if (uvp->ixl_opcode == IXL1394_OP_SEND_PKT_WHDR_ST_U) {
1174 		uvp->bufsize -= 4;
1175 		uvp->bufaddr += 4;
1176 	}
1177 
1178 	/* update old hcihdr w/new bufsize */
1179 	hcidescp = (hci1394_desc_imm_t *)xferctlp->dma[0].dma_descp -
1180 	    uvp->hci_offset;
1181 	acc_hdl  = xferctlp->dma[0].dma_buf->bi_handle;
1182 	dma_hdl  = xferctlp->dma[0].dma_buf->bi_dma_handle;
1183 
1184 	/* Sync the descriptor before we grab the header(s) */
1185 	err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1186 	    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
1187 	if (err != DDI_SUCCESS) {
1188 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1189 
1190 		return (IXL_PREP_FAILURE);
1191 	}
1192 
1193 	desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
1194 	uvp->hcihdr = desc_hdr;
1195 	uvp->hcihdr &= ~DESC_HDR_REQCOUNT_MASK;
1196 	uvp->hcihdr |= (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
1197 	    DESC_HDR_REQCOUNT_MASK;
1198 
1199 	/* update old pkthdr2 w/new bufsize. error if exceeds 16k */
1200 	desc_hdr2 = ddi_get32(acc_hdl, &hcidescp->q2);
1201 	uvp->pkthdr2 = desc_hdr2;
1202 	uvp->pkthdr2 = (uvp->pkthdr2 & DESC_PKT_DATALEN_MASK) >>
1203 	    DESC_PKT_DATALEN_SHIFT;
1204 	uvp->pkthdr2 -= old_xfer_pkt_ixlp->size;
1205 	uvp->pkthdr2 += uvp->bufsize;
1206 
1207 	if (uvp->pkthdr2 > 0xFFFF) {
1208 		uvp->upd_status = IXL1394_EPKTSIZE_MAX_OFLO;
1209 
1210 		return (IXL_PREP_FAILURE);
1211 	}
1212 	uvp->pkthdr2 = (uvp->pkthdr2 << DESC_PKT_DATALEN_SHIFT) &
1213 	    DESC_PKT_DATALEN_MASK;
1214 
1215 	return (IXL_PREP_READY);
1216 }
1217 
1218 /*
1219  * hci1394_ixl_update_prep_send_buf()
1220  *    Preparation for update of an IXL1394_OP_SEND_BUF_U command.
1221  */
1222 static int
hci1394_ixl_update_prep_send_buf(hci1394_ixl_update_vars_t * uvp)1223 hci1394_ixl_update_prep_send_buf(hci1394_ixl_update_vars_t *uvp)
1224 {
1225 	ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
1226 	ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
1227 	hci1394_xfer_ctl_t *xferctlp;
1228 
1229 	old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
1230 	new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
1231 
1232 	/* check if any change between new and old IXL xfer commands */
1233 	if ((new_xfer_buf_ixlp->size == old_xfer_buf_ixlp->size) &&
1234 	    (new_xfer_buf_ixlp->pkt_size == old_xfer_buf_ixlp->pkt_size) &&
1235 	    (new_xfer_buf_ixlp->ixl_buf.ixldmac_addr ==
1236 	    old_xfer_buf_ixlp->ixl_buf.ixldmac_addr) &&
1237 	    (new_xfer_buf_ixlp->mem_bufp == old_xfer_buf_ixlp->mem_bufp)) {
1238 		/* no change, return with done ok status */
1239 		return (IXL_PREP_SUCCESS);
1240 	}
1241 
1242 	/* if new IXL buffer addresses are null, return error */
1243 	if ((new_xfer_buf_ixlp->ixl_buf.ixldmac_addr == 0) ||
1244 	    (new_xfer_buf_ixlp->mem_bufp == NULL)) {
1245 
1246 		uvp->upd_status = IXL1394_EXFER_BUF_MISSING;
1247 
1248 		return (IXL_PREP_FAILURE);
1249 	}
1250 
1251 	/*
1252 	 * check that the new pkt_size > 0 and the new size/pkt_size
1253 	 * doesn't change the count of DMA descriptor blocks required
1254 	 */
1255 	if ((new_xfer_buf_ixlp->pkt_size == 0) ||
1256 	    ((new_xfer_buf_ixlp->size / new_xfer_buf_ixlp->pkt_size) !=
1257 	    (old_xfer_buf_ixlp->size / old_xfer_buf_ixlp->pkt_size))) {
1258 
1259 		/* Error - new has different pkt count than old */
1260 		uvp->upd_status = IXL1394_EXFER_BUF_CNT_DIFF;
1261 
1262 		return (IXL_PREP_FAILURE);
1263 	}
1264 
1265 	/* set the old IXL xfer command as the current IXL xfer command */
1266 	uvp->ixlxferp = uvp->ixloldp;
1267 
1268 	/*
1269 	 * get Z bits (number of descriptor components in descriptor block)
1270 	 * from a DMA bound address in the xfer_ctl struct of the
1271 	 * IXL xfer command
1272 	 */
1273 	if ((xferctlp = (hci1394_xfer_ctl_t *)
1274 	    uvp->ixlxferp->compiler_privatep) == NULL) {
1275 
1276 		uvp->upd_status = IXL1394_EORIG_IXL_CORRUPTED;
1277 
1278 		return (IXL_PREP_FAILURE);
1279 	}
1280 
1281 	/* set depth to zero and count to update all dma descriptors */
1282 	uvp->ixldepth = 0;
1283 	uvp->ixlcount = xferctlp->cnt;
1284 
1285 	/*
1286 	 * set offset to the header(first) descriptor from the current (last)
1287 	 * descriptor.
1288 	 */
1289 	uvp->hdr_offset = xferctlp->dma[0].dma_bound & DESC_Z_MASK - 1;
1290 
1291 	/* set offset to the only(last) xfer descriptor */
1292 	uvp->hci_offset = 0;
1293 
1294 	/* set bufsize to the new pkt_size, set bufaddr to the new bufp */
1295 	uvp->bufsize = new_xfer_buf_ixlp->pkt_size;
1296 	uvp->bufaddr = new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
1297 
1298 	/*
1299 	 * if IXL1394_OP_SEND_PKT_WHDR_ST_U opcode, adjust size & buff,
1300 	 * step over header (a quadlet)
1301 	 */
1302 	if (uvp->ixl_opcode == IXL1394_OP_SEND_PKT_WHDR_ST_U) {
1303 		uvp->bufsize -= 4;
1304 		uvp->bufaddr += 4;
1305 	}
1306 
1307 	/* set hcihdr to new bufsize */
1308 	uvp->hcihdr = (uvp->bufsize << DESC_HDR_REQCOUNT_SHIFT) &
1309 	    DESC_HDR_REQCOUNT_MASK;
1310 
1311 	/* set pkthdr2 to new bufsize */
1312 	uvp->pkthdr2 = (uvp->bufsize << DESC_PKT_DATALEN_SHIFT) &
1313 	    DESC_PKT_DATALEN_MASK;
1314 
1315 	return (IXL_PREP_READY);
1316 }
1317 
1318 /*
1319  * hci1394_ixl_update_perform()
1320  *    performs the actual update into DMA memory.
1321  */
1322 static int
hci1394_ixl_update_perform(hci1394_ixl_update_vars_t * uvp)1323 hci1394_ixl_update_perform(hci1394_ixl_update_vars_t *uvp)
1324 {
1325 	int			ii;
1326 	uint_t			skipaddrlast;
1327 	hci1394_xfer_ctl_t	*xferctlp;
1328 	hci1394_desc_imm_t	*hcidescp;
1329 	hci1394_iso_ctxt_t	*ctxtp;
1330 	ddi_acc_handle_t	acc_hdl;
1331 	ddi_dma_handle_t	dma_hdl;
1332 	int			err;
1333 
1334 	ctxtp = uvp->ctxtp;
1335 
1336 	/*
1337 	 * if no target ixl xfer command to be updated or it has
1338 	 * no xfer_ctl struct, then internal error.
1339 	 */
1340 	if ((uvp->ixlxferp == NULL) ||
1341 	    ((xferctlp = (hci1394_xfer_ctl_t *)
1342 	    uvp->ixlxferp->compiler_privatep) == NULL)) {
1343 
1344 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1345 
1346 		return (DDI_FAILURE);
1347 	}
1348 
1349 	/* perform update based on specific ixl command type */
1350 	switch (uvp->ixl_opcode) {
1351 
1352 	case IXL1394_OP_JUMP_U: {
1353 		ixl1394_jump_t *old_jump_ixlp;
1354 		ixl1394_jump_t *new_jump_ixlp;
1355 
1356 		old_jump_ixlp = (ixl1394_jump_t *)uvp->ixloldp;
1357 		new_jump_ixlp = (ixl1394_jump_t *)uvp->ixlnewp;
1358 
1359 		/*
1360 		 * set new hdr and new branch fields into last component of last
1361 		 * dma descriptor block of ixl xfer cmd associated with
1362 		 * ixl jump cmd
1363 		 */
1364 		hcidescp = (hci1394_desc_imm_t *)
1365 		    xferctlp->dma[xferctlp->cnt - 1].dma_descp;
1366 		acc_hdl	 = xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_handle;
1367 		dma_hdl	 =
1368 		    xferctlp->dma[xferctlp->cnt - 1].dma_buf->bi_dma_handle;
1369 
1370 		ddi_put32(acc_hdl, &hcidescp->hdr, uvp->hcihdr);
1371 		ddi_put32(acc_hdl, &hcidescp->branch, uvp->jumpaddr);
1372 
1373 		/*
1374 		 * if xfer type is send and skip mode is IXL1394__SKIP_TO_NEXT
1375 		 * also set branch location into branch field of first
1376 		 * component (skip to address) of last dma descriptor block
1377 		 */
1378 		if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
1379 			hcidescp -= uvp->hci_offset;
1380 			ddi_put32(acc_hdl, &hcidescp->branch, uvp->skipaddr);
1381 		}
1382 
1383 		/* Sync descriptor for device (desc was modified) */
1384 		err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1385 		    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1386 		if (err != DDI_SUCCESS) {
1387 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1388 
1389 			return (DDI_FAILURE);
1390 		}
1391 
1392 		/* set old ixl jump cmd label from new ixl jump cmd label */
1393 		old_jump_ixlp->label = new_jump_ixlp->label;
1394 		break;
1395 	}
1396 	case IXL1394_OP_SET_SKIPMODE_U: {
1397 		ixl1394_set_skipmode_t *old_set_skipmode_ixlp;
1398 		ixl1394_set_skipmode_t *new_set_skipmode_ixlp;
1399 
1400 		old_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixloldp;
1401 		new_set_skipmode_ixlp = (ixl1394_set_skipmode_t *)uvp->ixlnewp;
1402 
1403 		/*
1404 		 * if skip to next mode, save skip addr for last iteration
1405 		 * thru dma descriptor blocks for associated ixl xfer command
1406 		 */
1407 		if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
1408 			skipaddrlast = uvp->skipaddr;
1409 		}
1410 
1411 		/*
1412 		 * iterate through set of dma descriptor blocks for associated
1413 		 * ixl xfer start cmd and set new skip address into first hci
1414 		 * descriptor of each if skip next or skip self, first determine
1415 		 * address in each iteration
1416 		 */
1417 		for (ii = 0; ii < xferctlp->cnt; ii++) {
1418 			hcidescp = (hci1394_desc_imm_t *)
1419 			    xferctlp->dma[ii].dma_descp - uvp->hci_offset;
1420 			acc_hdl	 = xferctlp->dma[ii].dma_buf->bi_handle;
1421 			dma_hdl	 = xferctlp->dma[ii].dma_buf->bi_dma_handle;
1422 
1423 			if (uvp->skipmode == IXL1394_SKIP_TO_NEXT) {
1424 				if (ii < (xferctlp->cnt - 1)) {
1425 					uvp->skipaddr =
1426 					    xferctlp->dma[ii + 1].dma_bound;
1427 				} else {
1428 					uvp->skipaddr = skipaddrlast;
1429 				}
1430 			} else if (uvp->skipmode == IXL1394_SKIP_TO_SELF) {
1431 				uvp->skipaddr = xferctlp->dma[ii].dma_bound;
1432 			}
1433 
1434 			ddi_put32(acc_hdl, &hcidescp->branch, uvp->skipaddr);
1435 
1436 			/* Sync descriptor for device (desc was modified) */
1437 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1438 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1439 			if (err != DDI_SUCCESS) {
1440 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1441 
1442 				return (DDI_FAILURE);
1443 			}
1444 		}
1445 
1446 		/*
1447 		 * set old ixl set skip mode cmd mode and label from new ixl cmd
1448 		 * set old ixl set skip mode cmd compilier_privatep to
1449 		 * uvp->skipxferp
1450 		 */
1451 		old_set_skipmode_ixlp->skipmode = uvp->skipmode;
1452 		old_set_skipmode_ixlp->label = new_set_skipmode_ixlp->label;
1453 		old_set_skipmode_ixlp->compiler_privatep =
1454 		    (ixl1394_priv_t)uvp->skipxferp;
1455 		break;
1456 	}
1457 	case IXL1394_OP_SET_TAGSYNC_U: {
1458 		ixl1394_set_tagsync_t *old_set_tagsync_ixlp;
1459 		ixl1394_set_tagsync_t *new_set_tagsync_ixlp;
1460 
1461 		old_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixloldp;
1462 		new_set_tagsync_ixlp = (ixl1394_set_tagsync_t *)uvp->ixlnewp;
1463 
1464 		/*
1465 		 * iterate through set of descriptor blocks for associated IXL
1466 		 * xfer command and set new pkthdr1 value into output more/last
1467 		 * immediate hci descriptor (first/last hci descriptor of each
1468 		 * descriptor block)
1469 		 */
1470 		for (ii = 0; ii < xferctlp->cnt; ii++) {
1471 			hcidescp = (hci1394_desc_imm_t *)
1472 			    xferctlp->dma[ii].dma_descp - uvp->hdr_offset;
1473 			acc_hdl	 = xferctlp->dma[ii].dma_buf->bi_handle;
1474 			dma_hdl	 = xferctlp->dma[ii].dma_buf->bi_dma_handle;
1475 			ddi_put32(acc_hdl, &hcidescp->q1, uvp->pkthdr1);
1476 
1477 			/* Sync descriptor for device (desc was modified) */
1478 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1479 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1480 			if (err != DDI_SUCCESS) {
1481 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1482 
1483 				return (DDI_FAILURE);
1484 			}
1485 		}
1486 
1487 		/*
1488 		 * set old ixl set tagsync cmd tag & sync from new ixl set
1489 		 * tagsync cmd
1490 		 */
1491 		old_set_tagsync_ixlp->tag = new_set_tagsync_ixlp->tag;
1492 		old_set_tagsync_ixlp->sync = new_set_tagsync_ixlp->sync;
1493 		break;
1494 	}
1495 	case IXL1394_OP_RECV_PKT_U:
1496 	case IXL1394_OP_RECV_PKT_ST_U: {
1497 		ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
1498 		ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
1499 		uint32_t	   desc_status;
1500 
1501 		old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
1502 		new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
1503 
1504 		/*
1505 		 * alter buffer address, count and rescount in ixl recv pkt cmd
1506 		 * related hci component in dma descriptor block
1507 		 */
1508 		hcidescp = (hci1394_desc_imm_t *)
1509 		    xferctlp->dma[0].dma_descp - uvp->hci_offset;
1510 		acc_hdl	 = xferctlp->dma[0].dma_buf->bi_handle;
1511 		dma_hdl	 = xferctlp->dma[0].dma_buf->bi_dma_handle;
1512 		ddi_put32(acc_hdl, &hcidescp->hdr, uvp->hcihdr);
1513 		ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
1514 
1515 		/* Sync the descriptor before we grab the status */
1516 		err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1517 		    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
1518 		if (err != DDI_SUCCESS) {
1519 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1520 
1521 			return (DDI_FAILURE);
1522 		}
1523 
1524 		/* change only low 1/2 word and leave status bits unchanged */
1525 		desc_status = ddi_get32(acc_hdl, &hcidescp->status);
1526 		desc_status = (desc_status & ~DESC_ST_RESCOUNT_MASK) |
1527 		    uvp->hcistatus;
1528 		ddi_put32(acc_hdl, &hcidescp->status, desc_status);
1529 
1530 		/* Sync descriptor for device (desc was modified) */
1531 		err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1532 		    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1533 		if (err != DDI_SUCCESS) {
1534 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1535 
1536 			return (DDI_FAILURE);
1537 		}
1538 
1539 		/*
1540 		 * set old ixl recv pkt size and buffers from new
1541 		 * ixl recv pkt command
1542 		 */
1543 		old_xfer_pkt_ixlp->size = new_xfer_pkt_ixlp->size;
1544 		old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr =
1545 		    new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr;
1546 		old_xfer_pkt_ixlp->mem_bufp = new_xfer_pkt_ixlp->mem_bufp;
1547 		break;
1548 	}
1549 	case IXL1394_OP_RECV_BUF_U: {
1550 		ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
1551 		ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
1552 		uint32_t	   desc_hdr;
1553 		uint32_t	   desc_status;
1554 
1555 		old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
1556 		new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
1557 
1558 		/*
1559 		 * iterate through set of descriptor blocks for this IXL xfer
1560 		 * command altering buffer, count and rescount in each
1561 		 * input more/last(the only) hci descriptor block descriptor.
1562 		 */
1563 		for (ii = 0; ii < xferctlp->cnt; ii++) {
1564 
1565 			hcidescp = (hci1394_desc_imm_t *)
1566 			    xferctlp->dma[ii].dma_descp - uvp->hci_offset;
1567 			acc_hdl	 = xferctlp->dma[ii].dma_buf->bi_handle;
1568 			dma_hdl	 = xferctlp->dma[ii].dma_buf->bi_dma_handle;
1569 
1570 			ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
1571 
1572 			/*
1573 			 * advance to next buffer segment, adjust over header
1574 			 * if appropriate
1575 			 */
1576 			uvp->bufaddr += uvp->bufsize;
1577 
1578 			/* Sync the descriptor before we grab the header(s) */
1579 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1580 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
1581 			if (err != DDI_SUCCESS) {
1582 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1583 
1584 				return (DDI_FAILURE);
1585 			}
1586 
1587 			/*
1588 			 * this preserves interrupt enable bits, et al. in each
1589 			 * descriptor block header.
1590 			 */
1591 			desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
1592 			desc_hdr = (desc_hdr & ~DESC_HDR_REQCOUNT_MASK) |
1593 			    uvp->hcihdr;
1594 			ddi_put32(acc_hdl, &hcidescp->hdr, desc_hdr);
1595 
1596 			/*
1597 			 * change only low 1/2 word leaving status bits
1598 			 * unchanged
1599 			 */
1600 			desc_status = ddi_get32(acc_hdl, &hcidescp->status);
1601 			desc_status = (desc_status & ~DESC_ST_RESCOUNT_MASK) |
1602 			    uvp->hcistatus;
1603 			ddi_put32(acc_hdl, &hcidescp->status, desc_status);
1604 
1605 			/* Sync descriptor for device (desc was modified) */
1606 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1607 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1608 			if (err != DDI_SUCCESS) {
1609 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1610 
1611 				return (DDI_FAILURE);
1612 			}
1613 		}
1614 
1615 		/*
1616 		 * set old ixl recv buf sizes and buffers from
1617 		 * new ixl recv pkt cmd
1618 		 */
1619 		old_xfer_buf_ixlp->pkt_size = new_xfer_buf_ixlp->pkt_size;
1620 		old_xfer_buf_ixlp->size = new_xfer_buf_ixlp->size;
1621 		old_xfer_buf_ixlp->ixl_buf.ixldmac_addr =
1622 		    new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
1623 		old_xfer_buf_ixlp->mem_bufp = new_xfer_buf_ixlp->mem_bufp;
1624 		break;
1625 	}
1626 	case IXL1394_OP_SEND_PKT_U:
1627 	case IXL1394_OP_SEND_PKT_ST_U:
1628 	case IXL1394_OP_SEND_PKT_WHDR_ST_U: {
1629 		ixl1394_xfer_pkt_t *old_xfer_pkt_ixlp;
1630 		ixl1394_xfer_pkt_t *new_xfer_pkt_ixlp;
1631 
1632 		old_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixloldp;
1633 		new_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)uvp->ixlnewp;
1634 
1635 		/*
1636 		 * replace pkthdr2 in output more immediate (the first) hci
1637 		 * descriptor in block, then alter buffer address and count in
1638 		 * IXL send pkt command related output more/last hci descriptor.
1639 		 */
1640 		hcidescp = (hci1394_desc_imm_t *)xferctlp->dma[0].dma_descp -
1641 		    uvp->hdr_offset;
1642 		acc_hdl	 = xferctlp->dma[0].dma_buf->bi_handle;
1643 		dma_hdl	 = xferctlp->dma[0].dma_buf->bi_dma_handle;
1644 
1645 		ddi_put32(acc_hdl, &hcidescp->q2, uvp->pkthdr2);
1646 		ddi_put32(acc_hdl, &hcidescp->hdr, uvp->hcihdr);
1647 		ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
1648 
1649 		/* Sync descriptor for device (desc was modified) */
1650 		err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1651 		    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1652 		if (err != DDI_SUCCESS) {
1653 			uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1654 
1655 			return (DDI_FAILURE);
1656 		}
1657 
1658 		/*
1659 		 * set old ixl recv pkt size and buffers from
1660 		 * new ixl recv pkt cmd
1661 		 */
1662 		old_xfer_pkt_ixlp->size = new_xfer_pkt_ixlp->size;
1663 		old_xfer_pkt_ixlp->ixl_buf.ixldmac_addr =
1664 		    new_xfer_pkt_ixlp->ixl_buf.ixldmac_addr;
1665 		old_xfer_pkt_ixlp->mem_bufp = new_xfer_pkt_ixlp->mem_bufp;
1666 		break;
1667 	}
1668 	case IXL1394_OP_SEND_BUF_U: {
1669 		ixl1394_xfer_buf_t *old_xfer_buf_ixlp;
1670 		ixl1394_xfer_buf_t *new_xfer_buf_ixlp;
1671 		uint32_t	   desc_hdr;
1672 
1673 		old_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixloldp;
1674 		new_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)uvp->ixlnewp;
1675 
1676 		/*
1677 		 * iterate through set of descriptor blocks for this IXL xfer
1678 		 * command replacing pkthdr2 in output more immediate
1679 		 * (the first) hci descriptor block descriptor, then altering
1680 		 * buffer address and count in each output last (the only other)
1681 		 * hci descriptor block descriptor.
1682 		 */
1683 		for (ii = 0; ii < xferctlp->cnt; ii++) {
1684 			hcidescp = (hci1394_desc_imm_t *)
1685 			    xferctlp->dma[ii].dma_descp - uvp->hdr_offset;
1686 			acc_hdl	 = xferctlp->dma[ii].dma_buf->bi_handle;
1687 			dma_hdl	 = xferctlp->dma[ii].dma_buf->bi_dma_handle;
1688 
1689 			ddi_put32(acc_hdl, &hcidescp->q2, uvp->pkthdr2);
1690 			ddi_put32(acc_hdl, &hcidescp->data_addr, uvp->bufaddr);
1691 
1692 			/* advance to next buffer segment */
1693 			uvp->bufaddr += uvp->bufsize;
1694 
1695 			/* Sync the descriptor before we grab the header(s) */
1696 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1697 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORCPU);
1698 			if (err != DDI_SUCCESS) {
1699 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1700 
1701 				return (DDI_FAILURE);
1702 			}
1703 
1704 			/*
1705 			 * this preserves interrupt enable bits, et al
1706 			 * in each desc block hdr
1707 			 */
1708 			desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
1709 			desc_hdr = (desc_hdr & ~DESC_HDR_REQCOUNT_MASK) |
1710 			    uvp->hcihdr;
1711 			ddi_put32(acc_hdl, &hcidescp->hdr, desc_hdr);
1712 
1713 			/* Sync descriptor for device (desc was modified) */
1714 			err = ddi_dma_sync(dma_hdl, (off_t)hcidescp,
1715 			    sizeof (hci1394_desc_imm_t), DDI_DMA_SYNC_FORDEV);
1716 			if (err != DDI_SUCCESS) {
1717 				uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1718 
1719 				return (DDI_FAILURE);
1720 			}
1721 		}
1722 
1723 		/*
1724 		 * set old ixl recv buf sizes and buffers from
1725 		 * new ixl recv pkt cmd
1726 		 */
1727 		old_xfer_buf_ixlp->pkt_size = new_xfer_buf_ixlp->pkt_size;
1728 		old_xfer_buf_ixlp->size = new_xfer_buf_ixlp->size;
1729 		old_xfer_buf_ixlp->ixl_buf.ixldmac_addr =
1730 		    new_xfer_buf_ixlp->ixl_buf.ixldmac_addr;
1731 		old_xfer_buf_ixlp->mem_bufp = new_xfer_buf_ixlp->mem_bufp;
1732 		break;
1733 	}
1734 	default:
1735 		/* ixl command being updated must be one of above, else error */
1736 		uvp->upd_status = IXL1394_EINTERNAL_ERROR;
1737 
1738 		return (DDI_FAILURE);
1739 	}
1740 
1741 	/* hit the WAKE bit in the context control register */
1742 	if (ctxtp->ctxt_flags & HCI1394_ISO_CTXT_RECV) {
1743 		HCI1394_IRCTXT_CTRL_SET(uvp->soft_statep, ctxtp->ctxt_index,
1744 		    0, 0, 0, 0, 0, 1 /* wake */);
1745 	} else {
1746 		HCI1394_ITCTXT_CTRL_SET(uvp->soft_statep, ctxtp->ctxt_index,
1747 		    0, 0, 0, 1 /* wake */);
1748 	}
1749 
1750 	/* perform update completed successfully */
1751 	return (DDI_SUCCESS);
1752 }
1753 
1754 /*
1755  * hci1394_ixl_update_evaluate()
1756  *    Evaluate where the hardware is in running through the DMA descriptor
1757  *    blocks.
1758  */
1759 static int
hci1394_ixl_update_evaluate(hci1394_ixl_update_vars_t * uvp)1760 hci1394_ixl_update_evaluate(hci1394_ixl_update_vars_t *uvp)
1761 {
1762 	hci1394_iso_ctxt_t	*ctxtp;
1763 	ixl1394_command_t	*ixlp;
1764 	int			ixldepth;
1765 	int			ii;
1766 
1767 	ctxtp = uvp->ctxtp;
1768 
1769 	ixlp = NULL;
1770 	ixldepth = 0xFFFFFFFF;
1771 
1772 	/*
1773 	 * repeat until IXL execution status evaluation function returns error
1774 	 * or until pointer to currently executing IXL command and its depth
1775 	 * stablize
1776 	 */
1777 	while ((ixlp != ctxtp->ixl_execp) ||
1778 	    (ixldepth != ctxtp->ixl_exec_depth)) {
1779 
1780 		ixlp = ctxtp->ixl_execp;
1781 		ixldepth = ctxtp->ixl_exec_depth;
1782 
1783 		/*
1784 		 * call IXL execution status evaluation (ixl_dma_sync)
1785 		 * function returning if error (HCI1394_IXL_INTR_DMALOST is
1786 		 * only error condition).
1787 		 *
1788 		 * Note: interrupt processing function can only return one of
1789 		 * the following statuses here:
1790 		 *    HCI1394_IXL_INTR_NOERROR, HCI1394_IXL_INTR_DMASTOP,
1791 		 *    HCI1394_IXL_INTR_DMALOST
1792 		 *
1793 		 * it can not return the following status here:
1794 		 *    HCI1394_IXL_INTR_NOADV
1795 		 *
1796 		 * Don't need to grab the lock here... for the same reason
1797 		 * explained in hci1394_ixl_update_endup() above.
1798 		 */
1799 		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
1800 		if (hci1394_ixl_dma_sync(uvp->soft_statep, ctxtp) ==
1801 		    HCI1394_IXL_INTR_DMALOST) {
1802 
1803 			/* return post-perform update failed status */
1804 			uvp->upd_status = IXL1394_EPOST_UPD_DMALOST;
1805 
1806 			return (DDI_FAILURE);
1807 		}
1808 	}
1809 
1810 	/*
1811 	 * if the currently executing IXL command is one of the IXL_MAX_LOCN
1812 	 * locations saved before update was performed, return update
1813 	 * successful status.
1814 	 */
1815 	for (ii = 0; ii < IXL_MAX_LOCN; ii++) {
1816 		if ((uvp->locn_info[ii].ixlp == ixlp) &&
1817 		    (uvp->locn_info[ii].ixldepth == ixldepth)) {
1818 
1819 			return (DDI_SUCCESS);
1820 		}
1821 	}
1822 
1823 	/*
1824 	 * else return post-perform update failed status.
1825 	 * note: later can make more sophisticated evaluations about where
1826 	 * execution processing went, and if update has really failed.
1827 	 */
1828 	uvp->upd_status = IXL1394_EPOST_UPD_DMALOST;
1829 
1830 	return (DDI_FAILURE);
1831 }
1832 
1833 /*
1834  * hci1394_ixl_update_analysis()
1835  *    Determine if the hardware is within the range we expected it to be.
1836  *    If so the update succeeded.
1837  */
1838 static int
hci1394_ixl_update_analysis(hci1394_ixl_update_vars_t * uvp)1839 hci1394_ixl_update_analysis(hci1394_ixl_update_vars_t *uvp)
1840 {
1841 	hci1394_iso_ctxt_t	*ctxtp;
1842 	ixl1394_command_t	*ixlp;
1843 	int			ixldepth;
1844 	int			ii;
1845 	int			status;
1846 
1847 	ctxtp = uvp->ctxtp;
1848 
1849 	ixlp = NULL;
1850 	ixldepth = 0xFFFFFFFF;
1851 
1852 	/*
1853 	 * repeat until ixl execution status evaluation function returns error
1854 	 * or until pointer to currently executing ixl command and its depth
1855 	 * stablize.
1856 	 */
1857 	while ((ixlp != ctxtp->ixl_execp) ||
1858 	    (ixldepth != ctxtp->ixl_exec_depth)) {
1859 
1860 		ixlp = ctxtp->ixl_execp;
1861 		ixldepth = ctxtp->ixl_exec_depth;
1862 
1863 		/*
1864 		 * call ixl execution status evaluation (interrupt processing).
1865 		 * set IXL1394_EIDU_PRE_UPD_DMALOST if status INTR_DMALOST and
1866 		 * return.
1867 		 *
1868 		 * Note: interrupt processing function can only return one of
1869 		 * the following statuses here:
1870 		 *    HCI1394_IXL_INTR_NOERROR, HCI1394_IXL_INTR_DMASTOP or
1871 		 *    HCI1394_IXL_INTR_DMALOST
1872 		 *
1873 		 * it can not return the following status here:
1874 		 *    HCI1394_IXL_INTR_NOADV
1875 		 *
1876 		 * Don't need to grab the lock here... for the same reason
1877 		 * explained in hci1394_ixl_update_endup() above.
1878 		 */
1879 		ctxtp->intr_flags &= ~HCI1394_ISO_CTXT_INTRSET;
1880 
1881 		status = hci1394_ixl_dma_sync(uvp->soft_statep, ctxtp);
1882 		if (status == HCI1394_IXL_INTR_DMALOST) {
1883 			/*
1884 			 * set pre-update dma processing lost status and
1885 			 * return error
1886 			 */
1887 			uvp->upd_status = IXL1394_EPRE_UPD_DMALOST;
1888 
1889 			return (DDI_FAILURE);
1890 		}
1891 	}
1892 
1893 	/*
1894 	 * save locations of currently executing ixl command and the
1895 	 * 3 following it.
1896 	 */
1897 	hci1394_ixl_update_set_locn_info(uvp);
1898 
1899 	/*
1900 	 * if xfer_ixl_cmd associated with the IXL_command being updated is one
1901 	 * of the saved (currently executing) IXL commands, risk is too great to
1902 	 * perform update now, set IXL1394_ERISK_PROHIBITS_UPD status and
1903 	 * return error.
1904 	 *
1905 	 * Note: later can implement more sophisticated risk override
1906 	 * evaluations and processing.
1907 	 */
1908 	for (ii = 0; ii < IXL_MAX_LOCN; ii++) {
1909 
1910 		if ((uvp->locn_info[ii].ixlp == uvp->ixlxferp) &&
1911 		    (uvp->locn_info[ii].ixldepth >= uvp->ixldepth) &&
1912 		    (uvp->locn_info[ii].ixldepth <
1913 		    (uvp->ixldepth + uvp->ixlcount))) {
1914 
1915 			uvp->upd_status = IXL1394_ERISK_PROHIBITS_UPD;
1916 
1917 			return (DDI_FAILURE);
1918 		}
1919 	}
1920 
1921 	/* is save for update to be performed, return ok status */
1922 	return (DDI_SUCCESS);
1923 }
1924 
1925 /*
1926  * hci1394_ixl_update_set_locn_info()
1927  *    set up the local list of the IXL_MAX_LOCN next commandPtr locations we
1928  *    expect the hardware to get to in the next 125 microseconds.
1929  */
1930 static void
hci1394_ixl_update_set_locn_info(hci1394_ixl_update_vars_t * uvp)1931 hci1394_ixl_update_set_locn_info(hci1394_ixl_update_vars_t *uvp)
1932 {
1933 	hci1394_iso_ctxt_t	*ctxtp;
1934 	ixl1394_command_t	*ixlp;
1935 	int			ixldepth;
1936 	int			ii;
1937 
1938 	/*
1939 	 * find next xfer start ixl command, starting with current ixl command
1940 	 * where execution last left off
1941 	 */
1942 	ctxtp = uvp->ctxtp;
1943 
1944 	ixldepth = ctxtp->ixl_exec_depth;
1945 	(void) hci1394_ixl_find_next_exec_xfer(ctxtp->ixl_execp, NULL, &ixlp);
1946 
1947 	/*
1948 	 * if the current IXL command wasn't a xfer start command, then reset
1949 	 * the depth to 0 for xfer command found
1950 	 */
1951 	if (ixlp != ctxtp->ixl_execp)
1952 		ixldepth = 0;
1953 
1954 	/*
1955 	 * save xfer start IXL command & its depth and also save location and
1956 	 * depth of the next IXL_MAX_LOCN-1 xfer start IXL commands following
1957 	 * it (if any)
1958 	 */
1959 	for (ii = 0; ii < IXL_MAX_LOCN; ii++) {
1960 		uvp->locn_info[ii].ixlp = ixlp;
1961 		uvp->locn_info[ii].ixldepth = ixldepth;
1962 
1963 		if (ixlp) {
1964 			/*
1965 			 * if more dma commands generated by this xfer command
1966 			 * still follow, use them. else, find the next xfer
1967 			 * start IXL command and set its depth to 0.
1968 			 */
1969 			if (++ixldepth >= ((hci1394_xfer_ctl_t *)
1970 			    ixlp->compiler_privatep)->cnt) {
1971 
1972 				(void) hci1394_ixl_find_next_exec_xfer(
1973 				    ixlp->next_ixlp, NULL, &ixlp);
1974 				ixldepth = 0;
1975 			}
1976 		}
1977 	}
1978 }
1979