xref: /titanic_41/usr/src/uts/common/io/1394/adapters/hci1394_ixl_comp.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * hci1394_ixl_comp.c
31  *    Isochronous IXL Compiler.
32  *    The compiler converts the general hardware independent IXL command
33  *    blocks into OpenHCI DMA descriptors.
34  */
35 
36 #include <sys/kmem.h>
37 #include <sys/types.h>
38 #include <sys/conf.h>
39 #include <sys/ddi.h>
40 #include <sys/sunddi.h>
41 
42 #include <sys/tnf_probe.h>
43 
44 #include <sys/1394/h1394.h>
45 #include <sys/1394/ixl1394.h>
46 #include <sys/1394/adapters/hci1394.h>
47 
48 /* compiler allocation size for DMA descriptors. 8000 is 500 descriptors */
49 #define	HCI1394_IXL_PAGESIZE	8000
50 
51 /* invalid opcode */
52 #define	IXL1394_OP_INVALID  (0 | IXL1394_OPTY_OTHER)
53 
54 /*
55  * maximum number of interrupts permitted for a single context in which
56  * the context does not advance to the next DMA descriptor.  Interrupts are
57  * triggered by 1) hardware completing a DMA descriptor block which has the
58  * interrupt (i) bits set, 2) a cycle_inconsistent interrupt, or 3) a cycle_lost
59  * interrupt.  Once the max is reached, the HCI1394_IXL_INTR_NOADV error is
60  * returned.
61  */
62 int hci1394_ixl_max_noadv_intrs = 8;
63 
64 
65 static void hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
66     hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
67     ixl1394_command_t *ixlp);
68 static void hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp);
69 static void hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp,
70     ixl1394_command_t *ixlp);
71 static void hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
72 static void hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
73 static void hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
74 static void hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp);
75 static void hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp);
76 static void hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
77 static void hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp);
78 static void hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp);
79 static int hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp,
80     caddr_t *dma_descpp, uint32_t *dma_desc_bound);
81 static void hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp);
82 static void hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp);
83 static void hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp);
84 static int hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp,
85     uint32_t bufp, uint16_t size);
86 static int hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp,
87     uint32_t count);
88 static int hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp);
89 static uint32_t hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp);
90 static hci1394_xfer_ctl_t *hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp,
91     uint32_t dmacnt);
92 static void *hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp,
93     uint32_t size, uint32_t *dma_bound);
94 static boolean_t hci1394_is_opcode_valid(uint16_t ixlopcode);
95 
96 
97 /*
98  * FULL LIST OF ACCEPTED IXL COMMAND OPCOCDES:
99  * Receive Only:			Transmit Only:
100  *    IXL1394_OP_RECV_PKT_ST		    IXL1394_OP_SEND_PKT_WHDR_ST
101  *    IXL1394_OP_RECV_PKT		    IXL1394_OP_SEND_PKT_ST
102  *    IXL1394_OP_RECV_BUF		    IXL1394_OP_SEND_PKT
103  *    IXL1394_OP_SET_SYNCWAIT		    IXL1394_OP_SEND_BUF
104  *					    IXL1394_OP_SEND_HDR_ONLY
105  * Receive or Transmit:			    IXL1394_OP_SEND_NO_PKT
106  *    IXL1394_OP_CALLBACK		    IXL1394_OP_SET_TAGSYNC
107  *    IXL1394_OP_LABEL			    IXL1394_OP_SET_SKIPMODE
108  *    IXL1394_OP_JUMP			    IXL1394_OP_STORE_TIMESTAMP
109  */
110 
111 /*
112  * hci1394_compile_ixl()
113  *    Top level ixl compiler entry point.  Scans ixl and builds openHCI 1.0
114  *    descriptor blocks in dma memory.
115  */
116 int
hci1394_compile_ixl(hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp,ixl1394_command_t * ixlp,int * resultp)117 hci1394_compile_ixl(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
118     ixl1394_command_t *ixlp, int *resultp)
119 {
120 	hci1394_comp_ixl_vars_t wv;	/* working variables used throughout */
121 
122 	ASSERT(soft_statep != NULL);
123 	ASSERT(ctxtp != NULL);
124 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_enter,
125 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
126 
127 	/* Initialize compiler working variables */
128 	hci1394_compile_ixl_init(&wv, soft_statep, ctxtp, ixlp);
129 
130 	/*
131 	 * First pass:
132 	 *    Parse ixl commands, building desc blocks, until end of IXL
133 	 *    linked list.
134 	 */
135 	hci1394_parse_ixl(&wv, ixlp);
136 
137 	/*
138 	 * Second pass:
139 	 *    Resolve all generated descriptor block jump and skip addresses.
140 	 *    Set interrupt enable in descriptor blocks which have callback
141 	 *    operations in their execution scope. (Previously store_timesamp
142 	 *    operations were counted also.) Set interrupt enable in descriptor
143 	 *    blocks which were introduced by an ixl label command.
144 	 */
145 	if (wv.dma_bld_error == 0) {
146 		hci1394_finalize_all_xfer_desc(&wv);
147 	}
148 
149 	/* Endup: finalize and cleanup ixl compile, return result */
150 	hci1394_compile_ixl_endup(&wv);
151 
152 	*resultp = wv.dma_bld_error;
153 	if (*resultp != 0) {
154 		TNF_PROBE_0_DEBUG(hci1394_compile_ixl_exit,
155 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
156 		return (DDI_FAILURE);
157 	} else {
158 		TNF_PROBE_0_DEBUG(hci1394_compile_ixl_exit,
159 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
160 		return (DDI_SUCCESS);
161 	}
162 }
163 
164 /*
165  * hci1394_compile_ixl_init()
166  *    Initialize the isoch context structure associated with the IXL
167  *    program, and initialize the temporary working variables structure.
168  */
169 static void
hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t * wvp,hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp,ixl1394_command_t * ixlp)170 hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
171     hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
172     ixl1394_command_t *ixlp)
173 {
174 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_init_enter,
175 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
176 
177 	/* initialize common recv/xmit compile values */
178 	wvp->soft_statep = soft_statep;
179 	wvp->ctxtp = ctxtp;
180 
181 	/* init/clear ctxtp values */
182 	ctxtp->dma_mem_execp = NULL;
183 	ctxtp->dma_firstp = NULL;
184 	ctxtp->dma_last_time = 0;
185 	ctxtp->xcs_firstp = NULL;
186 	ctxtp->ixl_exec_depth = 0;
187 	ctxtp->ixl_execp = NULL;
188 	ctxtp->ixl_firstp = ixlp;
189 	ctxtp->default_skipxferp = NULL;
190 
191 	/*
192 	 * the context's max_noadv_intrs is set here instead of in isoch init
193 	 * because the default is patchable and would only be picked up this way
194 	 */
195 	ctxtp->max_noadv_intrs = hci1394_ixl_max_noadv_intrs;
196 
197 	/* init working variables */
198 	wvp->xcs_firstp = NULL;
199 	wvp->xcs_currentp = NULL;
200 
201 	wvp->dma_firstp = NULL;
202 	wvp->dma_currentp = NULL;
203 	wvp->dma_bld_error = 0;
204 
205 	wvp->ixl_io_mode = ctxtp->ctxt_flags;
206 	wvp->ixl_cur_cmdp = NULL;
207 	wvp->ixl_cur_xfer_stp = NULL;
208 	wvp->ixl_cur_labelp = NULL;
209 
210 	wvp->ixl_xfer_st_cnt = 0;	/* count of xfer start commands found */
211 	wvp->xfer_state = XFER_NONE;	/* none, pkt, buf, skip, hdronly */
212 	wvp->xfer_hci_flush = 0;	/* updateable - xfer, jump, set */
213 	wvp->xfer_pktlen = 0;
214 	wvp->xfer_bufcnt = 0;
215 	wvp->descriptors = 0;
216 
217 	/* START RECV ONLY SECTION */
218 	wvp->ixl_setsyncwait_cnt = 0;
219 
220 	/* START XMIT ONLY SECTION */
221 	wvp->ixl_settagsync_cmdp = NULL;
222 	wvp->ixl_setskipmode_cmdp = NULL;
223 	wvp->default_skipmode = ctxtp->default_skipmode; /* nxt,self,stop,jmp */
224 	wvp->default_skiplabelp = ctxtp->default_skiplabelp;
225 	wvp->default_skipxferp = NULL;
226 	wvp->skipmode = ctxtp->default_skipmode;
227 	wvp->skiplabelp = NULL;
228 	wvp->skipxferp = NULL;
229 	wvp->default_tag = ctxtp->default_tag;
230 	wvp->default_sync = ctxtp->default_sync;
231 	wvp->storevalue_bufp = hci1394_alloc_storevalue_dma_mem(wvp);
232 	wvp->storevalue_data = 0;
233 	wvp->xmit_pkthdr1 = 0;
234 	wvp->xmit_pkthdr2 = 0;
235 	/* END XMIT ONLY SECTION */
236 
237 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_init_exit,
238 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
239 }
240 
241 /*
242  * hci1394_compile_ixl_endup()
243  *    This routine is called just before the main hci1394_compile_ixl() exits.
244  *    It checks for errors and performs the appropriate cleanup, or it rolls any
245  *    relevant info from the working variables struct into the context structure
246  */
247 static void
hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t * wvp)248 hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp)
249 {
250 	ixl1394_command_t *ixl_exec_stp;
251 	hci1394_idma_desc_mem_t *dma_nextp;
252 	int err;
253 
254 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_endup_enter,
255 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
256 
257 	/* error if no descriptor blocks found in ixl & created in dma memory */
258 	if ((wvp->dma_bld_error == 0) && (wvp->ixl_xfer_st_cnt == 0)) {
259 		TNF_PROBE_1(hci1394_compile_ixl_endup_nodata_error,
260 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
261 		    "IXL1394_ENO_DATA_PKTS: prog has no data packets");
262 
263 		wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
264 	}
265 
266 	/* if no errors yet, find the first IXL command that's a transfer cmd */
267 	if (wvp->dma_bld_error == 0) {
268 		err = hci1394_ixl_find_next_exec_xfer(wvp->ctxtp->ixl_firstp,
269 		    NULL, &ixl_exec_stp);
270 
271 		/* error if a label<->jump loop, or no xfer */
272 		if ((err == DDI_FAILURE) || (ixl_exec_stp == NULL)) {
273 			TNF_PROBE_1(hci1394_compile_ixl_endup_error,
274 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
275 			    "IXL1394_ENO_DATA_PKTS: loop or no xfer detected");
276 
277 			wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
278 		}
279 	}
280 
281 	/* Sync all the DMA descriptor buffers */
282 	dma_nextp = wvp->ctxtp->dma_firstp;
283 	while (dma_nextp != NULL) {
284 		err = ddi_dma_sync(dma_nextp->mem.bi_dma_handle,
285 		    (off_t)dma_nextp->mem.bi_kaddr, dma_nextp->mem.bi_length,
286 		    DDI_DMA_SYNC_FORDEV);
287 		if (err != DDI_SUCCESS) {
288 			wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
289 
290 			TNF_PROBE_1(hci1394_compile_ixl_endup_error,
291 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
292 			    "IXL1394_INTERNAL_ERROR: dma_sync() failed");
293 			break;
294 		}
295 
296 		/* advance to next dma memory descriptor */
297 		dma_nextp = dma_nextp->dma_nextp;
298 	}
299 
300 	/*
301 	 * If error, cleanup and return. delete all allocated xfer_ctl structs
302 	 * and all dma descriptor page memory and its dma memory blocks too.
303 	 */
304 	if (wvp->dma_bld_error != 0) {
305 		wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
306 		wvp->ctxtp->dma_firstp = wvp->dma_firstp;
307 		hci1394_ixl_cleanup(wvp->soft_statep, wvp->ctxtp);
308 
309 		TNF_PROBE_0_DEBUG(hci1394_compile_ixl_endup_exit,
310 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
311 		return;
312 	}
313 
314 	/* can only get to here if the first ixl transfer command is found */
315 
316 	/* set required processing vars into ctxtp struct */
317 	wvp->ctxtp->default_skipxferp = wvp->default_skipxferp;
318 	wvp->ctxtp->dma_mem_execp = 0;
319 
320 	/*
321 	 * the transfer command's compiler private xfer_ctl structure has the
322 	 * appropriate bound address
323 	 */
324 	wvp->ctxtp->dma_mem_execp = (uint32_t)((hci1394_xfer_ctl_t *)
325 	    ixl_exec_stp->compiler_privatep)->dma[0].dma_bound;
326 	wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
327 	wvp->ctxtp->dma_firstp = wvp->dma_firstp;
328 	wvp->ctxtp->dma_last_time = 0;
329 	wvp->ctxtp->ixl_exec_depth = 0;
330 	wvp->ctxtp->ixl_execp = NULL;
331 
332 	/* compile done */
333 	TNF_PROBE_0_DEBUG(hci1394_compile_ixl_endup_exit,
334 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
335 }
336 
337 /*
338  * hci1394_parse_ixl()
339  *    Scan IXL program and build ohci DMA descriptor blocks in dma memory.
340  *
341  *    Parse/process succeeding ixl commands until end of IXL linked list is
342  *    reached. Evaluate ixl syntax and build (xmit or recv) descriptor
343  *    blocks.  To aid execution time evaluation of current location, enable
344  *    status recording on each descriptor block built.
345  *    On xmit, set sync & tag bits. On recv, optionally set wait for sync bit.
346  */
347 static void
hci1394_parse_ixl(hci1394_comp_ixl_vars_t * wvp,ixl1394_command_t * ixlp)348 hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp, ixl1394_command_t *ixlp)
349 {
350 	ixl1394_command_t *ixlnextp = ixlp;	/* addr of next ixl cmd */
351 	ixl1394_command_t *ixlcurp = NULL;	/* addr of current ixl cmd */
352 	uint16_t ixlopcode = 0;			/* opcode of currnt ixl cmd */
353 
354 	uint32_t pktsize;
355 	uint32_t pktcnt;
356 
357 	TNF_PROBE_0_DEBUG(hci1394_parse_ixl_enter, HCI1394_TNF_HAL_STACK_ISOCH,
358 	    "");
359 
360 	/* follow ixl links until reach end or find error */
361 	while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
362 
363 		/* set this command as the current ixl command */
364 		wvp->ixl_cur_cmdp = ixlcurp = ixlnextp;
365 		ixlnextp = ixlcurp->next_ixlp;
366 
367 		ixlopcode = ixlcurp->ixl_opcode;
368 
369 		/* init compiler controlled values in current ixl command */
370 		ixlcurp->compiler_privatep = NULL;
371 		ixlcurp->compiler_resv = 0;
372 
373 		/* error if xmit/recv mode not appropriate for current cmd */
374 		if ((((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) &&
375 			((ixlopcode & IXL1394_OPF_ONRECV) == 0)) ||
376 		    (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
377 			((ixlopcode & IXL1394_OPF_ONXMIT) == 0))) {
378 
379 			/* check if command op failed because it was invalid */
380 			if (hci1394_is_opcode_valid(ixlopcode) != B_TRUE) {
381 				TNF_PROBE_3(hci1394_parse_ixl_bad_opcode_error,
382 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
383 				    errmsg, "IXL1394_BAD_IXL_OPCODE",
384 				    tnf_opaque, ixl_commandp, ixlcurp,
385 				    tnf_opaque, ixl_opcode, ixlopcode);
386 
387 				wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
388 			} else {
389 				TNF_PROBE_3(hci1394_parse_ixl_mode_error,
390 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
391 				    errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
392 				    "invalid ixlop in mode", tnf_uint, io_mode,
393 				    wvp->ixl_io_mode, tnf_opaque, ixl_opcode,
394 				    ixlopcode);
395 
396 				wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
397 			}
398 			continue;
399 		}
400 
401 		/*
402 		 * if ends xfer flag set, finalize current xfer descriptor
403 		 * block build
404 		 */
405 		if ((ixlopcode & IXL1394_OPF_ENDSXFER) != 0) {
406 			/* finalize any descriptor block build in progress */
407 			hci1394_finalize_cur_xfer_desc(wvp);
408 
409 			if (wvp->dma_bld_error != 0) {
410 				continue;
411 			}
412 		}
413 
414 		/*
415 		 * now process based on specific opcode value
416 		 */
417 		switch (ixlopcode) {
418 
419 		case IXL1394_OP_RECV_BUF:
420 		case IXL1394_OP_RECV_BUF_U: {
421 			ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
422 
423 			cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
424 
425 			/*
426 			 * In packet-per-buffer mode:
427 			 *    This ixl command builds a collection of xfer
428 			 *    descriptor blocks (size/pkt_size of them) each to
429 			 *    recv a packet whose buffer size is pkt_size and
430 			 *    whose buffer ptr is (pktcur*pkt_size + bufp)
431 			 *
432 			 * In buffer fill mode:
433 			 *    This ixl command builds a single xfer descriptor
434 			 *    block to recv as many packets or parts of packets
435 			 *    as can fit into the buffer size specified
436 			 *    (pkt_size is not used).
437 			 */
438 
439 			/* set xfer_state for new descriptor block build */
440 			wvp->xfer_state = XFER_BUF;
441 
442 			/* set this ixl command as current xferstart command */
443 			wvp->ixl_cur_xfer_stp = ixlcurp;
444 
445 			/*
446 			 * perform packet-per-buffer checks
447 			 * (no checks needed when in buffer fill mode)
448 			 */
449 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) == 0) {
450 
451 				/* the packets must use the buffer exactly */
452 				pktsize = cur_xfer_buf_ixlp->pkt_size;
453 				pktcnt = 0;
454 				if (pktsize != 0) {
455 					pktcnt = cur_xfer_buf_ixlp->size /
456 					    pktsize;
457 				}
458 				if ((pktcnt == 0) || ((pktsize * pktcnt) !=
459 				    cur_xfer_buf_ixlp->size)) {
460 
461 					TNF_PROBE_3(hci1394_parse_ixl_rat_error,
462 					    HCI1394_TNF_HAL_ERROR_ISOCH, "",
463 					    tnf_string, errmsg,
464 					    "IXL1394_EPKTSIZE_RATIO", tnf_int,
465 					    buf_size, cur_xfer_buf_ixlp->size,
466 					    tnf_int, pkt_size, pktsize);
467 
468 					wvp->dma_bld_error =
469 					    IXL1394_EPKTSIZE_RATIO;
470 					continue;
471 				}
472 			}
473 
474 			/*
475 			 * set buffer pointer & size into first xfer_bufp
476 			 * and xfer_size
477 			 */
478 			if (hci1394_set_next_xfer_buf(wvp,
479 			    cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
480 			    cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
481 
482 				/* wvp->dma_bld_error is set by above call */
483 				continue;
484 			}
485 			break;
486 		}
487 
488 		case IXL1394_OP_RECV_PKT_ST:
489 		case IXL1394_OP_RECV_PKT_ST_U: {
490 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
491 
492 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
493 
494 			/* error if in buffer fill mode */
495 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
496 				TNF_PROBE_1(hci1394_parse_ixl_mode_error,
497 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
498 				    errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
499 				    "RECV_PKT_ST used in BFFILL mode");
500 
501 				wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
502 				continue;
503 			}
504 
505 			/* set xfer_state for new descriptor block build */
506 			/* set this ixl command as current xferstart command */
507 			wvp->xfer_state = XFER_PKT;
508 			wvp->ixl_cur_xfer_stp = ixlcurp;
509 
510 			/*
511 			 * set buffer pointer & size into first xfer_bufp
512 			 * and xfer_size
513 			 */
514 			if (hci1394_set_next_xfer_buf(wvp,
515 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
516 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
517 
518 				/* wvp->dma_bld_error is set by above call */
519 				continue;
520 			}
521 			break;
522 		}
523 
524 		case IXL1394_OP_RECV_PKT:
525 		case IXL1394_OP_RECV_PKT_U: {
526 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
527 
528 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
529 
530 			/* error if in buffer fill mode */
531 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
532 				TNF_PROBE_1(hci1394_parse_ixl_mode_error,
533 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
534 				    errmsg, "IXL1394_EWRONG_XR_CMD_MODE: "
535 				    "RECV_PKT_ST used in BFFILL mode");
536 
537 				wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
538 				continue;
539 			}
540 
541 			/* error if xfer_state not xfer pkt */
542 			if (wvp->xfer_state != XFER_PKT) {
543 				TNF_PROBE_1(hci1394_parse_ixl_misplacercv_error,
544 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
545 				    errmsg, "IXL1394_EMISPLACED_RECV: "
546 				    "RECV_PKT without RECV_PKT_ST");
547 
548 				wvp->dma_bld_error = IXL1394_EMISPLACED_RECV;
549 				continue;
550 			}
551 
552 			/*
553 			 * save xfer start cmd ixl ptr in compiler_privatep
554 			 * field of this cmd
555 			 */
556 			ixlcurp->compiler_privatep = (void *)
557 			    wvp->ixl_cur_xfer_stp;
558 
559 			/*
560 			 * save pkt index [1-n] in compiler_resv field of
561 			 * this cmd
562 			 */
563 			ixlcurp->compiler_resv = wvp->xfer_bufcnt;
564 
565 			/*
566 			 * set buffer pointer & size into next xfer_bufp
567 			 * and xfer_size
568 			 */
569 			if (hci1394_set_next_xfer_buf(wvp,
570 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
571 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
572 
573 				/* wvp->dma_bld_error is set by above call */
574 				continue;
575 			}
576 
577 			/*
578 			 * set updateable xfer cache flush eval flag if
579 			 * updateable opcode
580 			 */
581 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
582 				wvp->xfer_hci_flush |= UPDATEABLE_XFER;
583 			}
584 			break;
585 		}
586 
587 		case IXL1394_OP_SEND_BUF:
588 		case IXL1394_OP_SEND_BUF_U: {
589 			ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
590 
591 			cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
592 
593 			/*
594 			 * These send_buf commands build a collection of xmit
595 			 * descriptor blocks (size/pkt_size of them) each to
596 			 * xfer a packet whose buffer size is pkt_size and whose
597 			 * buffer pt is (pktcur*pkt_size + bufp). (ptr and size
598 			 * are adjusted if they have header form of ixl cmd)
599 			 */
600 
601 			/* set xfer_state for new descriptor block build */
602 			wvp->xfer_state = XFER_BUF;
603 
604 			/* set this ixl command as current xferstart command */
605 			wvp->ixl_cur_xfer_stp = ixlcurp;
606 
607 			/* the packets must use the buffer exactly,else error */
608 			pktsize = cur_xfer_buf_ixlp->pkt_size;
609 			pktcnt = 0;
610 			if (pktsize != 0) {
611 				pktcnt = cur_xfer_buf_ixlp->size / pktsize;
612 			}
613 			if ((pktcnt == 0) || ((pktsize * pktcnt) !=
614 			    cur_xfer_buf_ixlp->size)) {
615 
616 				TNF_PROBE_3(hci1394_parse_ixl_rat_error,
617 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
618 				    errmsg, "IXL1394_EPKTSIZE_RATIO", tnf_int,
619 				    buf_size, cur_xfer_buf_ixlp->size, tnf_int,
620 				    pkt_size, pktsize);
621 
622 				wvp->dma_bld_error = IXL1394_EPKTSIZE_RATIO;
623 				continue;
624 			}
625 
626 			/* set buf ptr & size into 1st xfer_bufp & xfer_size */
627 			if (hci1394_set_next_xfer_buf(wvp,
628 			    cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
629 			    cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
630 
631 				/* wvp->dma_bld_error is set by above call */
632 				continue;
633 			}
634 			break;
635 		}
636 
637 		case IXL1394_OP_SEND_PKT_ST:
638 		case IXL1394_OP_SEND_PKT_ST_U: {
639 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
640 
641 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
642 
643 			/* set xfer_state for new descriptor block build */
644 			/* set this ixl command as current xferstart command */
645 			wvp->xfer_state = XFER_PKT;
646 			wvp->ixl_cur_xfer_stp = ixlcurp;
647 
648 			/*
649 			 * set buffer pointer & size into first xfer_bufp and
650 			 * xfer_size
651 			 */
652 			if (hci1394_set_next_xfer_buf(wvp,
653 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
654 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
655 
656 				/* wvp->dma_bld_error is set by above call */
657 				continue;
658 			}
659 			break;
660 		}
661 
662 		case IXL1394_OP_SEND_PKT_WHDR_ST:
663 		case IXL1394_OP_SEND_PKT_WHDR_ST_U: {
664 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
665 
666 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
667 
668 			/* set xfer_state for new descriptor block build */
669 			/* set this ixl command as current xferstart command */
670 			wvp->xfer_state = XFER_PKT;
671 			wvp->ixl_cur_xfer_stp = ixlcurp;
672 
673 			/*
674 			 * buffer size must be at least 4 (must include header),
675 			 * else error
676 			 */
677 			if (cur_xfer_pkt_ixlp->size < 4) {
678 				TNF_PROBE_2(hci1394_parse_ixl_hdr_missing_error,
679 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
680 				    errmsg, "IXL1394_EPKT_HDR_MISSING", tnf_int,
681 				    pkt_size, cur_xfer_pkt_ixlp->size);
682 
683 				wvp->dma_bld_error = IXL1394_EPKT_HDR_MISSING;
684 				continue;
685 			}
686 
687 			/*
688 			 * set buffer and size(excluding header) into first
689 			 * xfer_bufp and xfer_size
690 			 */
691 			if (hci1394_set_next_xfer_buf(wvp,
692 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr + 4,
693 			    cur_xfer_pkt_ixlp->size - 4) != DDI_SUCCESS) {
694 
695 				/* wvp->dma_bld_error is set by above call */
696 				continue;
697 			}
698 			break;
699 		}
700 
701 		case IXL1394_OP_SEND_PKT:
702 		case IXL1394_OP_SEND_PKT_U: {
703 			ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
704 
705 			cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
706 
707 			/* error if xfer_state not xfer pkt */
708 			if (wvp->xfer_state != XFER_PKT) {
709 				TNF_PROBE_1(hci1394_parse_ixl_misplacesnd_error,
710 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
711 				    errmsg, "IXL1394_EMISPLACED_SEND: SEND_PKT "
712 				    "without SEND_PKT_ST");
713 
714 				wvp->dma_bld_error = IXL1394_EMISPLACED_SEND;
715 				continue;
716 			}
717 
718 			/*
719 			 * save xfer start cmd ixl ptr in compiler_privatep
720 			 * field of this cmd
721 			 */
722 			ixlcurp->compiler_privatep = (void *)
723 			    wvp->ixl_cur_xfer_stp;
724 
725 			/*
726 			 * save pkt index [1-n] in compiler_resv field of this
727 			 * cmd
728 			 */
729 			ixlcurp->compiler_resv = wvp->xfer_bufcnt;
730 
731 			/*
732 			 * set buffer pointer & size into next xfer_bufp
733 			 * and xfer_size
734 			 */
735 			if (hci1394_set_next_xfer_buf(wvp,
736 			    cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
737 			    cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
738 
739 				/* wvp->dma_bld_error is set by above call */
740 				continue;
741 			}
742 
743 			/*
744 			 * set updateable xfer cache flush eval flag if
745 			 * updateable opcode
746 			 */
747 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
748 				wvp->xfer_hci_flush |= UPDATEABLE_XFER;
749 			}
750 			break;
751 		}
752 
753 		case IXL1394_OP_SEND_HDR_ONLY:
754 			/* set xfer_state for new descriptor block build */
755 			wvp->xfer_state = XMIT_HDRONLY;
756 
757 			/* set this ixl command as current xferstart command */
758 			wvp->ixl_cur_xfer_stp = ixlcurp;
759 			break;
760 
761 		case IXL1394_OP_SEND_NO_PKT:
762 			/* set xfer_state for new descriptor block build */
763 			wvp->xfer_state = XMIT_NOPKT;
764 
765 			/* set this ixl command as current xferstart command */
766 			wvp->ixl_cur_xfer_stp = ixlcurp;
767 			break;
768 
769 		case IXL1394_OP_JUMP:
770 		case IXL1394_OP_JUMP_U: {
771 			ixl1394_jump_t *cur_jump_ixlp;
772 
773 			cur_jump_ixlp = (ixl1394_jump_t *)ixlcurp;
774 
775 			/*
776 			 * verify label indicated by IXL1394_OP_JUMP is
777 			 * actually an IXL1394_OP_LABEL or NULL
778 			 */
779 			if ((cur_jump_ixlp->label != NULL) &&
780 			    (cur_jump_ixlp->label->ixl_opcode !=
781 			    IXL1394_OP_LABEL)) {
782 				TNF_PROBE_3(hci1394_parse_ixl_jumplabel_error,
783 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
784 				    errmsg, "IXL1394_EJUMP_NOT_TO_LABEL",
785 				    tnf_opaque, jumpixl_commandp, ixlcurp,
786 				    tnf_opaque, jumpto_ixl,
787 				    cur_jump_ixlp->label);
788 
789 				wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
790 				continue;
791 			}
792 			break;
793 		}
794 
795 		case IXL1394_OP_LABEL:
796 			/*
797 			 * save current ixl label command for xfer cmd
798 			 * finalize processing
799 			 */
800 			wvp->ixl_cur_labelp = ixlcurp;
801 
802 			/* set initiating label flag to cause cache flush */
803 			wvp->xfer_hci_flush |= INITIATING_LBL;
804 			break;
805 
806 		case IXL1394_OP_CALLBACK:
807 		case IXL1394_OP_CALLBACK_U:
808 		case IXL1394_OP_STORE_TIMESTAMP:
809 			/*
810 			 * these commands are accepted during compile,
811 			 * processed during execution (interrupt handling)
812 			 * No further processing is needed here.
813 			 */
814 			break;
815 
816 		case IXL1394_OP_SET_SKIPMODE:
817 		case IXL1394_OP_SET_SKIPMODE_U:
818 			/*
819 			 * Error if already have a set skipmode cmd for
820 			 * this xfer
821 			 */
822 			if (wvp->ixl_setskipmode_cmdp != NULL) {
823 				TNF_PROBE_2(hci1394_parse_ixl_dup_set_error,
824 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
825 				    errmsg, "IXL1394_EDUPLICATE_SET_CMD:"
826 				    " duplicate set skipmode", tnf_opaque,
827 				    ixl_commandp, ixlcurp);
828 
829 				wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
830 				continue;
831 			}
832 
833 			/* save skip mode ixl command and verify skipmode */
834 			wvp->ixl_setskipmode_cmdp = (ixl1394_set_skipmode_t *)
835 			    ixlcurp;
836 
837 			if ((wvp->ixl_setskipmode_cmdp->skipmode !=
838 				IXL1394_SKIP_TO_NEXT) &&
839 			    (wvp->ixl_setskipmode_cmdp->skipmode !=
840 				IXL1394_SKIP_TO_SELF) &&
841 			    (wvp->ixl_setskipmode_cmdp->skipmode !=
842 				IXL1394_SKIP_TO_STOP) &&
843 			    (wvp->ixl_setskipmode_cmdp->skipmode !=
844 				IXL1394_SKIP_TO_LABEL)) {
845 
846 				TNF_PROBE_3(hci1394_parse_ixl_dup_set_error,
847 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
848 				    errmsg, "IXL EBAD_SKIPMODE", tnf_opaque,
849 				    ixl_commandp, ixlcurp, tnf_int, skip,
850 				    wvp->ixl_setskipmode_cmdp->skipmode);
851 
852 				wvp->dma_bld_error = IXL1394_EBAD_SKIPMODE;
853 				continue;
854 			}
855 
856 			/*
857 			 * if mode is IXL1394_SKIP_TO_LABEL, verify label
858 			 * references an IXL1394_OP_LABEL
859 			 */
860 			if ((wvp->ixl_setskipmode_cmdp->skipmode ==
861 				IXL1394_SKIP_TO_LABEL) &&
862 			    ((wvp->ixl_setskipmode_cmdp->label == NULL) ||
863 			    (wvp->ixl_setskipmode_cmdp->label->ixl_opcode !=
864 				IXL1394_OP_LABEL))) {
865 
866 				TNF_PROBE_3(hci1394_parse_ixl_jump_error,
867 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
868 				    errmsg, "IXL1394_EJUMP_NOT_TO_LABEL",
869 				    tnf_opaque, jumpixl_commandp, ixlcurp,
870 				    tnf_opaque, jumpto_ixl,
871 				    wvp->ixl_setskipmode_cmdp->label);
872 
873 				wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
874 				continue;
875 			}
876 			/*
877 			 * set updateable set cmd cache flush eval flag if
878 			 * updateable opcode
879 			 */
880 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
881 				wvp->xfer_hci_flush |= UPDATEABLE_SET;
882 			}
883 			break;
884 
885 		case IXL1394_OP_SET_TAGSYNC:
886 		case IXL1394_OP_SET_TAGSYNC_U:
887 			/*
888 			 * is an error if already have a set tag and sync cmd
889 			 * for this xfer
890 			 */
891 			if (wvp->ixl_settagsync_cmdp != NULL) {
892 				TNF_PROBE_2(hci1394_parse_ixl_dup_set_error,
893 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
894 				    errmsg, "IXL1394_EDUPLICATE_SET_CMD:"
895 				    " duplicate set tagsync", tnf_opaque,
896 				    ixl_commandp, ixlcurp);
897 
898 				wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
899 				continue;
900 			}
901 
902 			/* save ixl command containing tag and sync values */
903 			wvp->ixl_settagsync_cmdp =
904 			    (ixl1394_set_tagsync_t *)ixlcurp;
905 
906 			/*
907 			 * set updateable set cmd cache flush eval flag if
908 			 * updateable opcode
909 			 */
910 			if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
911 				wvp->xfer_hci_flush |= UPDATEABLE_SET;
912 			}
913 			break;
914 
915 		case IXL1394_OP_SET_SYNCWAIT:
916 			/*
917 			 * count ixl wait-for-sync commands since last
918 			 * finalize ignore multiple occurrences for same xfer
919 			 * command
920 			 */
921 			wvp->ixl_setsyncwait_cnt++;
922 			break;
923 
924 		default:
925 			/* error - unknown/unimplemented ixl command */
926 			TNF_PROBE_3(hci1394_parse_ixl_bad_opcode_error,
927 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
928 			    "IXL1394_BAD_IXL_OPCODE", tnf_opaque, ixl_commandp,
929 			    ixlcurp, tnf_opaque, ixl_opcode, ixlopcode);
930 
931 			wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
932 			continue;
933 		}
934 	} /* while */
935 
936 	/* finalize any last descriptor block build */
937 	wvp->ixl_cur_cmdp = NULL;
938 	if (wvp->dma_bld_error == 0) {
939 		hci1394_finalize_cur_xfer_desc(wvp);
940 	}
941 
942 	TNF_PROBE_0_DEBUG(hci1394_parse_ixl_exit,
943 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
944 }
945 
946 /*
947  * hci1394_finalize_all_xfer_desc()
948  *    Pass 2: Scan IXL resolving all dma descriptor jump and skip addresses.
949  *
950  *    Set interrupt enable on first descriptor block associated with current
951  *    xfer IXL command if current IXL xfer was introduced by an IXL label cmnd.
952  *
953  *    Set interrupt enable on last descriptor block associated with current xfer
954  *    IXL command if any callback ixl commands are found on the execution path
955  *    between the current and the next xfer ixl command.  (Previously, this
956  *    applied to store timestamp ixl commands, as well.)
957  */
958 static void
hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t * wvp)959 hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
960 {
961 	ixl1394_command_t *ixlcurp;		/* current ixl command */
962 	ixl1394_command_t *ixlnextp;		/* next ixl command */
963 	ixl1394_command_t *ixlexecnext;
964 	hci1394_xfer_ctl_t	*xferctl_curp;
965 	hci1394_xfer_ctl_t	*xferctl_nxtp;
966 	hci1394_desc_t		*hcidescp;
967 	ddi_acc_handle_t	acc_hdl;
968 	uint32_t	temp;
969 	uint32_t	dma_execnext_addr;
970 	uint32_t	dma_skiplabel_addr;
971 	uint32_t	dma_skip_addr;
972 	uint32_t	callback_cnt;
973 	uint16_t	repcnt;
974 	uint16_t	ixlopcode;
975 	int		ii;
976 	int		err;
977 
978 	TNF_PROBE_0_DEBUG(hci1394_finalize_all_xfer_desc_enter,
979 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
980 
981 	/*
982 	 * If xmit mode and if default skipmode is skip to label -
983 	 * follow exec path starting at default skipmode label until
984 	 * find the first ixl xfer command which is to be executed.
985 	 * Set its address into default_skipxferp.
986 	 */
987 	if (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
988 	    (wvp->ctxtp->default_skipmode == IXL1394_SKIP_TO_LABEL)) {
989 
990 		err = hci1394_ixl_find_next_exec_xfer(wvp->default_skiplabelp,
991 		    NULL, &wvp->default_skipxferp);
992 		if (err == DDI_FAILURE) {
993 			TNF_PROBE_2(hci1394_finalize_all_xfer_desc_error,
994 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
995 			    "IXL1394_ENO_DATA_PKTS: label<->jump loop detected "
996 			    "for skiplabel default w/no xfers", tnf_opaque,
997 			    skipixl_cmdp, wvp->default_skiplabelp);
998 			TNF_PROBE_0_DEBUG(hci1394_finalize_all_xfer_desc_exit,
999 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1000 
1001 			wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
1002 			return;
1003 		}
1004 	}
1005 
1006 	/* set first ixl cmd */
1007 	ixlnextp = wvp->ctxtp->ixl_firstp;
1008 
1009 	/* follow ixl links until reach end or find error */
1010 	while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
1011 
1012 		/* set this command as the current ixl command */
1013 		ixlcurp = ixlnextp;
1014 		ixlnextp = ixlcurp->next_ixlp;
1015 
1016 		/* get command opcode removing unneeded update flag */
1017 		ixlopcode = ixlcurp->ixl_opcode & ~IXL1394_OPF_UPDATE;
1018 
1019 		/*
1020 		 * Scan for next ixl xfer start command (including this one),
1021 		 * along ixl link path. Once xfer command found, find next IXL
1022 		 * xfer cmd along execution path and fill in branch address of
1023 		 * current xfer command. If is composite ixl xfer command, first
1024 		 * link forward branch dma addresses of each descriptor block in
1025 		 * composite, until reach final one then set its branch address
1026 		 * to next execution path xfer found.  Next determine skip mode
1027 		 * and fill in skip address(es) appropriately.
1028 		 */
1029 		/* skip to next if not xfer start ixl command */
1030 		if (((ixlopcode & IXL1394_OPF_ISXFER) == 0) ||
1031 		    ((ixlopcode & IXL1394_OPTY_MASK) == 0)) {
1032 			continue;
1033 		}
1034 
1035 		/*
1036 		 * get xfer_ctl structure and composite repeat count for current
1037 		 * IXL xfer cmd
1038 		 */
1039 		xferctl_curp = (hci1394_xfer_ctl_t *)ixlcurp->compiler_privatep;
1040 		repcnt = xferctl_curp->cnt;
1041 
1042 		/*
1043 		 * if initiated by an IXL label command, set interrupt enable
1044 		 * flag into last component of first descriptor block of
1045 		 * current IXL xfer cmd
1046 		 */
1047 		if ((xferctl_curp->ctl_flags & XCTL_LABELLED) != 0) {
1048 			hcidescp = (hci1394_desc_t *)
1049 			    xferctl_curp->dma[0].dma_descp;
1050 			acc_hdl = xferctl_curp->dma[0].dma_buf->bi_handle;
1051 			temp = ddi_get32(acc_hdl, &hcidescp->hdr);
1052 			temp |= DESC_INTR_ENBL;
1053 			ddi_put32(acc_hdl, &hcidescp->hdr, temp);
1054 		}
1055 
1056 		/* find next xfer IXL cmd by following execution path */
1057 		err = hci1394_ixl_find_next_exec_xfer(ixlcurp->next_ixlp,
1058 		    &callback_cnt, &ixlexecnext);
1059 
1060 		/* if label<->jump loop detected, return error */
1061 		if (err == DDI_FAILURE) {
1062 			wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
1063 
1064 			TNF_PROBE_2(hci1394_finalize_all_xfer_desc_error,
1065 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1066 			    "IXL1394_ENO_DATA_PKTS: label<->jump loop detected "
1067 			    "w/no xfers", tnf_opaque, ixl_cmdp,
1068 			    ixlcurp->next_ixlp);
1069 			continue;
1070 		}
1071 
1072 		/* link current IXL's xfer_ctl to next xfer IXL on exec path */
1073 		xferctl_curp->execp = ixlexecnext;
1074 
1075 		/*
1076 		 * if callbacks have been seen during execution path scan,
1077 		 * set interrupt enable flag into last descriptor of last
1078 		 * descriptor block of current IXL xfer cmd
1079 		 */
1080 		if (callback_cnt != 0) {
1081 			hcidescp = (hci1394_desc_t *)
1082 			    xferctl_curp->dma[repcnt - 1].dma_descp;
1083 			acc_hdl =
1084 			    xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
1085 			temp = ddi_get32(acc_hdl, &hcidescp->hdr);
1086 			temp |= DESC_INTR_ENBL;
1087 			ddi_put32(acc_hdl, &hcidescp->hdr, temp);
1088 		}
1089 
1090 		/*
1091 		 * obtain dma bound addr of next exec path IXL xfer command,
1092 		 * if any
1093 		 */
1094 		dma_execnext_addr = 0;
1095 
1096 		if (ixlexecnext != NULL) {
1097 			xferctl_nxtp = (hci1394_xfer_ctl_t *)
1098 			    ixlexecnext->compiler_privatep;
1099 			dma_execnext_addr = xferctl_nxtp->dma[0].dma_bound;
1100 		} else {
1101 			/*
1102 			 * If this is last descriptor (next == NULL), then
1103 			 * make sure the interrupt bit is enabled.  This
1104 			 * way we can ensure that we are notified when the
1105 			 * descriptor chain processing has come to an end.
1106 			 */
1107 			hcidescp = (hci1394_desc_t *)
1108 			    xferctl_curp->dma[repcnt - 1].dma_descp;
1109 			acc_hdl =
1110 			    xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
1111 			temp = ddi_get32(acc_hdl, &hcidescp->hdr);
1112 			temp |= DESC_INTR_ENBL;
1113 			ddi_put32(acc_hdl, &hcidescp->hdr, temp);
1114 		}
1115 
1116 		/*
1117 		 * set jump address of final cur IXL xfer cmd to addr next
1118 		 * IXL xfer cmd
1119 		 */
1120 		hcidescp = (hci1394_desc_t *)
1121 		    xferctl_curp->dma[repcnt - 1].dma_descp;
1122 		acc_hdl = xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
1123 		ddi_put32(acc_hdl, &hcidescp->branch, dma_execnext_addr);
1124 
1125 		/*
1126 		 * if a composite object, forward link initial jump
1127 		 * dma addresses
1128 		 */
1129 		for (ii = 0; ii < repcnt - 1; ii++) {
1130 			hcidescp = (hci1394_desc_t *)
1131 			    xferctl_curp->dma[ii].dma_descp;
1132 			acc_hdl	 = xferctl_curp->dma[ii].dma_buf->bi_handle;
1133 			ddi_put32(acc_hdl, &hcidescp->branch,
1134 			    xferctl_curp->dma[ii + 1].dma_bound);
1135 		}
1136 
1137 		/*
1138 		 * fill in skip address(es) for all descriptor blocks belonging
1139 		 * to current IXL xfer command; note:skip addresses apply only
1140 		 * to xmit mode commands
1141 		 */
1142 		if ((ixlopcode & IXL1394_OPF_ONXMIT) != 0) {
1143 
1144 			/* first obtain and set skip mode information */
1145 			wvp->ixl_setskipmode_cmdp = xferctl_curp->skipmodep;
1146 			hci1394_set_xmit_skip_mode(wvp);
1147 
1148 			/*
1149 			 * if skip to label,init dma bound addr to be
1150 			 * 1st xfer cmd after label
1151 			 */
1152 			dma_skiplabel_addr = 0;
1153 			if ((wvp->skipmode == IXL1394_SKIP_TO_LABEL) &&
1154 			    (wvp->skipxferp != NULL)) {
1155 				xferctl_nxtp = (hci1394_xfer_ctl_t *)
1156 				    wvp->skipxferp->compiler_privatep;
1157 				dma_skiplabel_addr =
1158 				    xferctl_nxtp->dma[0].dma_bound;
1159 			}
1160 
1161 			/*
1162 			 * set skip addrs for each descriptor blk at this
1163 			 * xfer start IXL cmd
1164 			 */
1165 			for (ii = 0; ii < repcnt; ii++) {
1166 				switch (wvp->skipmode) {
1167 
1168 				case IXL1394_SKIP_TO_LABEL:
1169 					/* set dma bound address - label */
1170 					dma_skip_addr = dma_skiplabel_addr;
1171 					break;
1172 
1173 				case IXL1394_SKIP_TO_NEXT:
1174 					/* set dma bound address - next */
1175 					if (ii < repcnt - 1) {
1176 						dma_skip_addr = xferctl_curp->
1177 						    dma[ii + 1].dma_bound;
1178 					} else {
1179 						dma_skip_addr =
1180 						    dma_execnext_addr;
1181 					}
1182 					break;
1183 
1184 				case IXL1394_SKIP_TO_SELF:
1185 					/* set dma bound address - self */
1186 					dma_skip_addr =
1187 					    xferctl_curp->dma[ii].dma_bound;
1188 					break;
1189 
1190 				case IXL1394_SKIP_TO_STOP:
1191 				default:
1192 					/* set dma bound address - stop */
1193 					dma_skip_addr = 0;
1194 					break;
1195 				}
1196 
1197 				/*
1198 				 * determine address of first descriptor of
1199 				 * current descriptor block by adjusting addr of
1200 				 * last descriptor of current descriptor block
1201 				 */
1202 				hcidescp = ((hci1394_desc_t *)
1203 				    xferctl_curp->dma[ii].dma_descp);
1204 				acc_hdl =
1205 				    xferctl_curp->dma[ii].dma_buf->bi_handle;
1206 
1207 				/*
1208 				 * adjust by count of descriptors in this desc
1209 				 * block not including the last one (size of
1210 				 * descriptor)
1211 				 */
1212 				hcidescp -= ((xferctl_curp->dma[ii].dma_bound &
1213 				    DESC_Z_MASK) - 1);
1214 
1215 				/*
1216 				 * adjust further if the last descriptor is
1217 				 * double sized
1218 				 */
1219 				if (ixlopcode == IXL1394_OP_SEND_HDR_ONLY) {
1220 					hcidescp++;
1221 				}
1222 				/*
1223 				 * now set skip address into first descriptor
1224 				 * of descriptor block
1225 				 */
1226 				ddi_put32(acc_hdl, &hcidescp->branch,
1227 				    dma_skip_addr);
1228 			} /* for */
1229 		} /* if */
1230 	} /* while */
1231 
1232 	TNF_PROBE_0_DEBUG(hci1394_finalize_all_xfer_desc_exit,
1233 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1234 }
1235 
1236 /*
1237  * hci1394_finalize_cur_xfer_desc()
1238  *    Build the openHCI descriptor for a packet or buffer based on info
1239  *    currently collected into the working vars struct (wvp).  After some
1240  *    checks, this routine dispatches to the appropriate descriptor block
1241  *    build (bld) routine for the packet or buf type.
1242  */
1243 static void
hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t * wvp)1244 hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
1245 {
1246 	uint16_t ixlopcode;
1247 	uint16_t ixlopraw;
1248 
1249 	TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_enter,
1250 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1251 
1252 	/* extract opcode from current IXL cmd (if any) */
1253 	if (wvp->ixl_cur_cmdp != NULL) {
1254 		ixlopcode = wvp->ixl_cur_cmdp->ixl_opcode;
1255 		ixlopraw = ixlopcode & ~IXL1394_OPF_UPDATE;
1256 	} else {
1257 		ixlopcode = ixlopraw = IXL1394_OP_INVALID;
1258 	}
1259 
1260 	/*
1261 	 * if no xfer descriptor block being built, perform validity checks
1262 	 */
1263 	if (wvp->xfer_state == XFER_NONE) {
1264 		/*
1265 		 * error if being finalized by IXL1394_OP_LABEL or
1266 		 * IXL1394_OP_JUMP or if at end, and have an unapplied
1267 		 * IXL1394_OP_SET_TAGSYNC, IXL1394_OP_SET_SKIPMODE or
1268 		 * IXL1394_OP_SET_SYNCWAIT
1269 		 */
1270 		if ((ixlopraw == IXL1394_OP_JUMP) ||
1271 		    (ixlopraw == IXL1394_OP_LABEL) ||
1272 		    (wvp->ixl_cur_cmdp == NULL) ||
1273 		    (wvp->ixl_cur_cmdp->next_ixlp == NULL)) {
1274 			if ((wvp->ixl_settagsync_cmdp != NULL) ||
1275 			    (wvp->ixl_setskipmode_cmdp != NULL) ||
1276 			    (wvp->ixl_setsyncwait_cnt != 0)) {
1277 
1278 				wvp->dma_bld_error = IXL1394_EUNAPPLIED_SET_CMD;
1279 
1280 				TNF_PROBE_2(
1281 				    hci1394_finalize_cur_xfer_desc_set_error,
1282 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
1283 				    errmsg, "IXL1394_UNAPPLIED_SET_CMD: "
1284 				    "orphaned set (no associated packet)",
1285 				    tnf_opaque, ixl_commandp,
1286 				    wvp->ixl_cur_cmdp);
1287 				TNF_PROBE_0_DEBUG(
1288 				    hci1394_finalize_cur_xfer_desc_exit,
1289 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1290 				return;
1291 			}
1292 		}
1293 
1294 		/* error if finalize is due to updateable jump cmd */
1295 		if (ixlopcode == IXL1394_OP_JUMP_U) {
1296 
1297 			wvp->dma_bld_error = IXL1394_EUPDATE_DISALLOWED;
1298 
1299 			TNF_PROBE_2(hci1394_finalize_cur_xfer_desc_upd_error,
1300 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1301 			    "IXL1394_EUPDATE_DISALLOWED: jumpU w/out pkt",
1302 			    tnf_opaque, ixl_commandp, wvp->ixl_cur_cmdp);
1303 			TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
1304 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1305 			return;
1306 		}
1307 
1308 		TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
1309 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1310 
1311 		/* no error, no xfer */
1312 		return;
1313 	}
1314 
1315 	/*
1316 	 * finalize current xfer descriptor block being built
1317 	 */
1318 
1319 	/* count IXL xfer start command for descriptor block being built */
1320 	wvp->ixl_xfer_st_cnt++;
1321 
1322 	/*
1323 	 * complete setting of cache flush evaluation flags; flags will already
1324 	 * have been set by updateable set cmds and non-start xfer pkt cmds
1325 	 */
1326 	/* now set cache flush flag if current xfer start cmnd is updateable */
1327 	if ((wvp->ixl_cur_xfer_stp->ixl_opcode & IXL1394_OPF_UPDATE) != 0) {
1328 		wvp->xfer_hci_flush |= UPDATEABLE_XFER;
1329 	}
1330 	/*
1331 	 * also set cache flush flag if xfer being finalized by
1332 	 * updateable jump cmd
1333 	 */
1334 	if ((ixlopcode == IXL1394_OP_JUMP_U) != 0) {
1335 		wvp->xfer_hci_flush |= UPDATEABLE_JUMP;
1336 	}
1337 
1338 	/*
1339 	 * Determine if cache flush required before building next descriptor
1340 	 * block. If xfer pkt command and any cache flush flags are set,
1341 	 * hci flush needed.
1342 	 * If buffer or special xfer command and xfer command is updateable or
1343 	 * an associated set command is updateable, hci flush is required now.
1344 	 * If a single-xfer buffer or special xfer command is finalized by
1345 	 * updateable jump command, hci flush is required now.
1346 	 * Note: a cache flush will be required later, before the last
1347 	 * descriptor block of a multi-xfer set of descriptor blocks is built,
1348 	 * if this (non-pkt) xfer is finalized by an updateable jump command.
1349 	 */
1350 	if (wvp->xfer_hci_flush != 0) {
1351 		if (((wvp->ixl_cur_xfer_stp->ixl_opcode &
1352 		    IXL1394_OPTY_XFER_PKT_ST) != 0) || ((wvp->xfer_hci_flush &
1353 			(UPDATEABLE_XFER | UPDATEABLE_SET | INITIATING_LBL)) !=
1354 			0)) {
1355 
1356 			if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
1357 				TNF_PROBE_0_DEBUG(
1358 				    hci1394_finalize_cur_xfer_desc_exit,
1359 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1360 
1361 				/* wvp->dma_bld_error is set by above call */
1362 				return;
1363 			}
1364 		}
1365 	}
1366 
1367 	/*
1368 	 * determine which kind of descriptor block to build based on
1369 	 * xfer state - hdr only, skip cycle, pkt or buf.
1370 	 */
1371 	switch (wvp->xfer_state) {
1372 
1373 	case XFER_PKT:
1374 		if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
1375 			hci1394_bld_recv_pkt_desc(wvp);
1376 		} else {
1377 			hci1394_bld_xmit_pkt_desc(wvp);
1378 		}
1379 		break;
1380 
1381 	case XFER_BUF:
1382 		if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
1383 			if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
1384 				hci1394_bld_recv_buf_fill_desc(wvp);
1385 			} else {
1386 				hci1394_bld_recv_buf_ppb_desc(wvp);
1387 			}
1388 		} else {
1389 			hci1394_bld_xmit_buf_desc(wvp);
1390 		}
1391 		break;
1392 
1393 	case XMIT_HDRONLY:
1394 	case XMIT_NOPKT:
1395 		hci1394_bld_xmit_hdronly_nopkt_desc(wvp);
1396 		break;
1397 
1398 	default:
1399 		/* internal compiler error */
1400 		TNF_PROBE_2(hci1394_finalize_cur_xfer_desc_internal_error,
1401 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1402 		    "IXL1394_INTERNAL_ERROR: invalid state", tnf_opaque,
1403 		    ixl_commandp, wvp->ixl_cur_cmdp);
1404 		wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
1405 	}
1406 
1407 	/* return if error */
1408 	if (wvp->dma_bld_error != 0) {
1409 		TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
1410 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1411 
1412 		/* wvp->dma_bld_error is set by above call */
1413 		return;
1414 	}
1415 
1416 	/*
1417 	 * if was finalizing IXL jump cmd, set compiler_privatep to
1418 	 * cur xfer IXL cmd
1419 	 */
1420 	if (ixlopraw == IXL1394_OP_JUMP) {
1421 		wvp->ixl_cur_cmdp->compiler_privatep =
1422 		    (void *)wvp->ixl_cur_xfer_stp;
1423 	}
1424 
1425 	/* if cur xfer IXL initiated by IXL label cmd, set flag in xfer_ctl */
1426 	if (wvp->ixl_cur_labelp != NULL) {
1427 		((hci1394_xfer_ctl_t *)
1428 		    (wvp->ixl_cur_xfer_stp->compiler_privatep))->ctl_flags |=
1429 		    XCTL_LABELLED;
1430 		wvp->ixl_cur_labelp = NULL;
1431 	}
1432 
1433 	/*
1434 	 * set any associated IXL set skipmode cmd into xfer_ctl of
1435 	 * cur xfer IXL cmd
1436 	 */
1437 	if (wvp->ixl_setskipmode_cmdp != NULL) {
1438 		((hci1394_xfer_ctl_t *)
1439 		    (wvp->ixl_cur_xfer_stp->compiler_privatep))->skipmodep =
1440 		    wvp->ixl_setskipmode_cmdp;
1441 	}
1442 
1443 	/* set no current xfer start cmd */
1444 	wvp->ixl_cur_xfer_stp = NULL;
1445 
1446 	/* set no current set tag&sync, set skipmode or set syncwait commands */
1447 	wvp->ixl_settagsync_cmdp = NULL;
1448 	wvp->ixl_setskipmode_cmdp = NULL;
1449 	wvp->ixl_setsyncwait_cnt = 0;
1450 
1451 	/* set no currently active descriptor blocks */
1452 	wvp->descriptors = 0;
1453 
1454 	/* reset total packet length and buffers count */
1455 	wvp->xfer_pktlen = 0;
1456 	wvp->xfer_bufcnt = 0;
1457 
1458 	/* reset flush cache evaluation flags */
1459 	wvp->xfer_hci_flush = 0;
1460 
1461 	/* set no xmit descriptor block being built */
1462 	wvp->xfer_state = XFER_NONE;
1463 
1464 	TNF_PROBE_0_DEBUG(hci1394_finalize_cur_xfer_desc_exit,
1465 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1466 }
1467 
1468 /*
1469  * hci1394_bld_recv_pkt_desc()
1470  *    Used to create the openHCI dma descriptor block(s) for a receive packet.
1471  */
1472 static void
hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t * wvp)1473 hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
1474 {
1475 	hci1394_xfer_ctl_t	*xctlp;
1476 	caddr_t			dma_descp;
1477 	uint32_t		dma_desc_bound;
1478 	uint32_t		wait_for_sync;
1479 	uint32_t		ii;
1480 	hci1394_desc_t		*wv_descp;	/* shorthand to local descrpt */
1481 
1482 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_enter,
1483 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1484 
1485 	/*
1486 	 * is error if number of descriptors to be built exceeds maximum
1487 	 * descriptors allowed in a descriptor block.
1488 	 */
1489 	if ((wvp->descriptors + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
1490 
1491 		wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
1492 
1493 		TNF_PROBE_3(hci1394_bld_recv_pkt_desc_fragment_oflo_error,
1494 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1495 		    "IXL1394_EFRAGMENT_OFLO", tnf_opaque, ixl_commandp,
1496 		    wvp->ixl_cur_xfer_stp, tnf_int, frag_count,
1497 		    wvp->descriptors + wvp->xfer_bufcnt);
1498 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
1499 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1500 		return;
1501 	}
1502 
1503 	/* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
1504 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1505 
1506 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1507 
1508 		TNF_PROBE_2(hci1394_bld_recv_pkt_desc_mem_alloc_fail,
1509 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1510 		    "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
1511 		    ixl_commandp, wvp->ixl_cur_xfer_stp);
1512 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
1513 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1514 		return;
1515 	}
1516 
1517 	/*
1518 	 * save xfer_ctl struct addr in compiler_privatep of
1519 	 * current IXL xfer cmd
1520 	 */
1521 	wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1522 
1523 	/*
1524 	 * if enabled, set wait for sync flag in first descriptor of
1525 	 * descriptor block
1526 	 */
1527 	if (wvp->ixl_setsyncwait_cnt > 0) {
1528 		wvp->ixl_setsyncwait_cnt = 1;
1529 		wait_for_sync = DESC_W_ENBL;
1530 	} else {
1531 		wait_for_sync = DESC_W_DSABL;
1532 	}
1533 
1534 	/* create descriptor block for this recv packet (xfer status enabled) */
1535 	for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
1536 		wv_descp = &wvp->descriptor_block[wvp->descriptors];
1537 
1538 		if (ii == (wvp->xfer_bufcnt - 1)) {
1539 			HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL,
1540 			    DESC_INTR_DSABL, wait_for_sync, wvp->xfer_size[ii]);
1541 		} else {
1542 			HCI1394_INIT_IR_PPB_IMORE(wv_descp, wait_for_sync,
1543 			    wvp->xfer_size[ii]);
1544 		}
1545 		wv_descp->data_addr = wvp->xfer_bufp[ii];
1546 		wv_descp->branch = 0;
1547 		wv_descp->status = (wvp->xfer_size[ii] <<
1548 		    DESC_ST_RESCOUNT_SHIFT) & DESC_ST_RESCOUNT_MASK;
1549 		wvp->descriptors++;
1550 	}
1551 
1552 	/* allocate and copy descriptor block to dma memory */
1553 	if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
1554 	    DDI_SUCCESS) {
1555 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
1556 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1557 
1558 		/* wvp->dma_bld_error is set by above function call */
1559 		return;
1560 	}
1561 
1562 	/*
1563 	 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1564 	 * is last component)
1565 	 */
1566 	xctlp->dma[0].dma_bound = dma_desc_bound;
1567 	xctlp->dma[0].dma_descp =
1568 	    dma_descp + (wvp->xfer_bufcnt - 1) * sizeof (hci1394_desc_t);
1569 	xctlp->dma[0].dma_buf	= &wvp->dma_currentp->mem;
1570 
1571 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_pkt_desc_exit,
1572 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1573 }
1574 
1575 /*
1576  * hci1394_bld_recv_buf_ppb_desc()
1577  *    Used to create the openHCI dma descriptor block(s) for a receive buf
1578  *    in packet per buffer mode.
1579  */
1580 static void
hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t * wvp)1581 hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp)
1582 {
1583 	hci1394_xfer_ctl_t	*xctlp;
1584 	ixl1394_xfer_buf_t	*local_ixl_cur_xfer_stp;
1585 	caddr_t		dma_descp;
1586 	uint32_t	dma_desc_bound;
1587 	uint32_t	pktsize;
1588 	uint32_t	pktcnt;
1589 	uint32_t	wait_for_sync;
1590 	uint32_t	ii;
1591 	hci1394_desc_t	*wv_descp;	/* shorthand to local descriptor */
1592 
1593 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_enter,
1594 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1595 
1596 	local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1597 
1598 	/* determine number and size of pkt desc blocks to create */
1599 	pktsize = local_ixl_cur_xfer_stp->pkt_size;
1600 	pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
1601 
1602 	/* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
1603 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
1604 
1605 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1606 
1607 		TNF_PROBE_2(hci1394_bld_recv_buf_ppb_desc_mem_alloc_fail,
1608 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1609 		    "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
1610 		    ixl_commandp, wvp->ixl_cur_xfer_stp);
1611 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_exit,
1612 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1613 		return;
1614 	}
1615 
1616 	/*
1617 	 * save xfer_ctl struct addr in compiler_privatep of
1618 	 * current IXL xfer cmd
1619 	 */
1620 	local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1621 
1622 	/*
1623 	 * if enabled, set wait for sync flag in first descriptor in
1624 	 * descriptor block
1625 	 */
1626 	if (wvp->ixl_setsyncwait_cnt > 0) {
1627 		wvp->ixl_setsyncwait_cnt = 1;
1628 		wait_for_sync = DESC_W_ENBL;
1629 	} else {
1630 		wait_for_sync = DESC_W_DSABL;
1631 	}
1632 
1633 	/* create first descriptor block for this recv packet */
1634 	/* consists of one descriptor and xfer status is enabled */
1635 	wv_descp = &wvp->descriptor_block[wvp->descriptors];
1636 	HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
1637 	    wait_for_sync, pktsize);
1638 	wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
1639 	wv_descp->branch = 0;
1640 	wv_descp->status = (pktsize << DESC_ST_RESCOUNT_SHIFT) &
1641 	    DESC_ST_RESCOUNT_MASK;
1642 	wvp->descriptors++;
1643 
1644 	/* useful debug trace info - IXL command, and packet count and size */
1645 	TNF_PROBE_3_DEBUG(hci1394_bld_recv_buf_ppb_desc_recv_buf_info,
1646 	    HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_opaque, ixl_commandp,
1647 	    wvp->ixl_cur_xfer_stp, tnf_int, pkt_count, pktcnt, tnf_int,
1648 	    pkt_size, pktsize);
1649 
1650 	/*
1651 	 * generate as many contiguous descriptor blocks as there are
1652 	 * recv pkts
1653 	 */
1654 	for (ii = 0; ii < pktcnt; ii++) {
1655 
1656 		/* if about to create last descriptor block */
1657 		if (ii == (pktcnt - 1)) {
1658 			/* check and perform any required hci cache flush */
1659 			if (hci1394_flush_end_desc_check(wvp, ii) !=
1660 			    DDI_SUCCESS) {
1661 				TNF_PROBE_1_DEBUG(
1662 				    hci1394_bld_recv_buf_ppb_desc_fl_error,
1663 				    HCI1394_TNF_HAL_INFO_ISOCH, "", tnf_int,
1664 				    for_ii, ii);
1665 				TNF_PROBE_0_DEBUG(
1666 				    hci1394_bld_recv_buf_ppb_desc_exit,
1667 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
1668 
1669 				/* wvp->dma_bld_error is set by above call */
1670 				return;
1671 			}
1672 		}
1673 
1674 		/* allocate and copy descriptor block to dma memory */
1675 		if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
1676 		    &dma_desc_bound) != DDI_SUCCESS) {
1677 
1678 			TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_exit,
1679 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
1680 
1681 			/* wvp->dma_bld_error is set by above call */
1682 			return;
1683 		}
1684 
1685 		/*
1686 		 * set dma addrs into xfer_ctl struct (unbound addr (kernel
1687 		 * virtual) is last component (descriptor))
1688 		 */
1689 		xctlp->dma[ii].dma_bound = dma_desc_bound;
1690 		xctlp->dma[ii].dma_descp = dma_descp;
1691 		xctlp->dma[ii].dma_buf	 = &wvp->dma_currentp->mem;
1692 
1693 		/* advance buffer ptr by pktsize in descriptor block */
1694 		wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
1695 		    pktsize;
1696 	}
1697 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_ppb_desc_exit,
1698 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1699 }
1700 
1701 /*
1702  * hci1394_bld_recv_buf_fill_desc()
1703  *    Used to create the openHCI dma descriptor block(s) for a receive buf
1704  *    in buffer fill mode.
1705  */
1706 static void
hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t * wvp)1707 hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp)
1708 {
1709 	hci1394_xfer_ctl_t	*xctlp;
1710 	caddr_t			dma_descp;
1711 	uint32_t		dma_desc_bound;
1712 	uint32_t		wait_for_sync;
1713 	ixl1394_xfer_buf_t	*local_ixl_cur_xfer_stp;
1714 
1715 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_enter,
1716 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1717 
1718 	local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1719 
1720 
1721 	/* allocate an xfer_ctl struct including 1 xfer_ctl_dma structs */
1722 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1723 
1724 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1725 
1726 		TNF_PROBE_2(hci1394_bld_recv_buf_fill_desc_mem_alloc_fail,
1727 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1728 		    "IXL1394_EMEM_ALLOC_FAIL: xfer_ctl", tnf_opaque,
1729 		    ixl_commandp, wvp->ixl_cur_xfer_stp);
1730 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
1731 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1732 		return;
1733 	}
1734 
1735 	/*
1736 	 * save xfer_ctl struct addr in compiler_privatep of
1737 	 * current IXL xfer cmd
1738 	 */
1739 	local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1740 
1741 	/*
1742 	 * if enabled, set wait for sync flag in first descriptor of
1743 	 * descriptor block
1744 	 */
1745 	if (wvp->ixl_setsyncwait_cnt > 0) {
1746 		wvp->ixl_setsyncwait_cnt = 1;
1747 		wait_for_sync = DESC_W_ENBL;
1748 	} else {
1749 		wait_for_sync = DESC_W_DSABL;
1750 	}
1751 
1752 	/*
1753 	 * create descriptor block for this buffer fill mode recv command which
1754 	 * consists of one descriptor with xfer status enabled
1755 	 */
1756 	HCI1394_INIT_IR_BF_IMORE(&wvp->descriptor_block[wvp->descriptors],
1757 	    DESC_INTR_DSABL, wait_for_sync, local_ixl_cur_xfer_stp->size);
1758 
1759 	wvp->descriptor_block[wvp->descriptors].data_addr =
1760 	    local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
1761 	wvp->descriptor_block[wvp->descriptors].branch = 0;
1762 	wvp->descriptor_block[wvp->descriptors].status =
1763 	    (local_ixl_cur_xfer_stp->size << DESC_ST_RESCOUNT_SHIFT) &
1764 	    DESC_ST_RESCOUNT_MASK;
1765 	wvp->descriptors++;
1766 
1767 	/* check and perform any required hci cache flush */
1768 	if (hci1394_flush_end_desc_check(wvp, 0) != DDI_SUCCESS) {
1769 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
1770 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1771 
1772 		/* wvp->dma_bld_error is set by above call */
1773 		return;
1774 	}
1775 
1776 	/* allocate and copy descriptor block to dma memory */
1777 	if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound)
1778 	    != DDI_SUCCESS) {
1779 		TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
1780 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1781 
1782 		/* wvp->dma_bld_error is set by above call */
1783 		return;
1784 	}
1785 
1786 	/*
1787 	 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1788 	 * is last component.
1789 	 */
1790 	xctlp->dma[0].dma_bound = dma_desc_bound;
1791 	xctlp->dma[0].dma_descp = dma_descp;
1792 	xctlp->dma[0].dma_buf	= &wvp->dma_currentp->mem;
1793 
1794 	TNF_PROBE_0_DEBUG(hci1394_bld_recv_buf_fill_desc_exit,
1795 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1796 }
1797 
1798 /*
1799  * hci1394_bld_xmit_pkt_desc()
1800  *    Used to create the openHCI dma descriptor block(s) for a transmit packet.
1801  */
1802 static void
hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t * wvp)1803 hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
1804 {
1805 	hci1394_xfer_ctl_t *xctlp;
1806 	hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
1807 	hci1394_desc_t	*wv_descp;	/* shorthand to local descriptor */
1808 	caddr_t		dma_descp;	/* dma bound memory for descriptor */
1809 	uint32_t	dma_desc_bound;
1810 	uint32_t	ii;
1811 
1812 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_enter,
1813 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1814 
1815 	/*
1816 	 * is error if number of descriptors to be built exceeds maximum
1817 	 * descriptors allowed in a descriptor block. Add 2 for the overhead
1818 	 * of the OMORE-Immediate.
1819 	 */
1820 	if ((wvp->descriptors + 2 + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
1821 
1822 		wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
1823 
1824 		TNF_PROBE_3(hci1394_bld_xmit_pkt_desc_fragment_oflo_error,
1825 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1826 		    "IXL1394_EFRAGMENT_OFLO", tnf_opaque, ixl_commandp,
1827 		    wvp->ixl_cur_xfer_stp, tnf_int, frag_count,
1828 		    wvp->descriptors + 2 + wvp->xfer_bufcnt);
1829 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1830 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1831 		return;
1832 	}
1833 
1834 	/* is error if total packet length exceeds 0xFFFF */
1835 	if (wvp->xfer_pktlen > 0xFFFF) {
1836 
1837 		wvp->dma_bld_error = IXL1394_EPKTSIZE_MAX_OFLO;
1838 
1839 		TNF_PROBE_3(hci1394_bld_xmit_pkt_desc_packet_oflo_error,
1840 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1841 		    "IXL1394_EPKTSIZE_MAX_OFLO", tnf_opaque, ixl_commandp,
1842 		    wvp->ixl_cur_xfer_stp, tnf_int, total_pktlen,
1843 		    wvp->xfer_pktlen);
1844 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1845 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1846 		return;
1847 	}
1848 
1849 	/* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
1850 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1851 
1852 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1853 
1854 		TNF_PROBE_2(hci1394_bld_xmit_pkt_desc_mem_alloc_fail,
1855 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1856 		    "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
1857 		    ixl_commandp, wvp->ixl_cur_cmdp);
1858 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1859 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1860 		return;
1861 	}
1862 
1863 	/*
1864 	 * save xfer_ctl struct addr in compiler_privatep of
1865 	 * current IXL xfer cmd
1866 	 */
1867 	wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1868 
1869 	/* generate values for the xmit pkt hdrs */
1870 	hci1394_set_xmit_pkt_hdr(wvp);
1871 
1872 	/*
1873 	 * xmit pkt starts with an output more immediate,
1874 	 * a double sized hci1394_desc
1875 	 */
1876 	wv_omi_descp = (hci1394_output_more_imm_t *)
1877 	    (&wvp->descriptor_block[wvp->descriptors]);
1878 	HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
1879 
1880 	wv_omi_descp->data_addr = 0;
1881 	wv_omi_descp->branch = 0;
1882 	wv_omi_descp->status = 0;
1883 	wv_omi_descp->q1 = wvp->xmit_pkthdr1;
1884 	wv_omi_descp->q2 = wvp->xmit_pkthdr2;
1885 	wv_omi_descp->q3 = 0;
1886 	wv_omi_descp->q4 = 0;
1887 
1888 	wvp->descriptors += 2;
1889 
1890 	/*
1891 	 * create the required output more hci1394_desc descriptor, then create
1892 	 * an output last hci1394_desc descriptor with xfer status enabled
1893 	 */
1894 	for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
1895 		wv_descp = &wvp->descriptor_block[wvp->descriptors];
1896 
1897 		if (ii == (wvp->xfer_bufcnt - 1)) {
1898 			HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL,
1899 			    DESC_INTR_DSABL, wvp->xfer_size[ii]);
1900 		} else {
1901 			HCI1394_INIT_IT_OMORE(wv_descp, wvp->xfer_size[ii]);
1902 		}
1903 		wv_descp->data_addr = wvp->xfer_bufp[ii];
1904 		wv_descp->branch = 0;
1905 		wv_descp->status = 0;
1906 		wvp->descriptors++;
1907 	}
1908 
1909 	/* allocate and copy descriptor block to dma memory */
1910 	if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
1911 	    DDI_SUCCESS) {
1912 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1913 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1914 
1915 		/* wvp->dma_bld_error is set by above call */
1916 		return;
1917 	}
1918 
1919 	/*
1920 	 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1921 	 * is last component (descriptor))
1922 	 */
1923 	xctlp->dma[0].dma_bound = dma_desc_bound;
1924 	xctlp->dma[0].dma_descp =
1925 	    dma_descp + (wvp->xfer_bufcnt + 1) * sizeof (hci1394_desc_t);
1926 	xctlp->dma[0].dma_buf	= &wvp->dma_currentp->mem;
1927 
1928 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_pkt_desc_exit,
1929 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1930 }
1931 
1932 /*
1933  * hci1394_bld_xmit_buf_desc()
1934  *    Used to create the openHCI dma descriptor blocks for a transmit buffer.
1935  */
1936 static void
hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t * wvp)1937 hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp)
1938 {
1939 	hci1394_xfer_ctl_t	*xctlp;
1940 	ixl1394_xfer_buf_t	*local_ixl_cur_xfer_stp;
1941 	hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
1942 	hci1394_desc_t	*wv_descp;	/* shorthand to local descriptor */
1943 	caddr_t		dma_descp;
1944 	uint32_t	dma_desc_bound;
1945 	uint32_t	pktsize;
1946 	uint32_t	pktcnt;
1947 	uint32_t	ii;
1948 
1949 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_enter,
1950 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
1951 
1952 	local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1953 
1954 	/* determine number and size of pkt desc blocks to create */
1955 	pktsize = local_ixl_cur_xfer_stp->pkt_size;
1956 	pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
1957 
1958 	/* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
1959 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
1960 
1961 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1962 
1963 		TNF_PROBE_2(hci1394_bld_xmit_buf_desc_mem_alloc_fail,
1964 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
1965 		    "IXL1394_EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
1966 		    ixl_commandp, wvp->ixl_cur_cmdp);
1967 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_exit,
1968 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
1969 		return;
1970 	}
1971 
1972 	/*
1973 	 * save xfer_ctl struct addr in compiler_privatep of
1974 	 * current IXL xfer cmd
1975 	 */
1976 	local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1977 
1978 	/* generate values for the xmit pkt hdrs */
1979 	wvp->xfer_pktlen = pktsize;
1980 	hci1394_set_xmit_pkt_hdr(wvp);
1981 
1982 	/*
1983 	 * xmit pkt starts with an output more immediate,
1984 	 * a double sized hci1394_desc
1985 	 */
1986 	wv_omi_descp = (hci1394_output_more_imm_t *)
1987 	    &wvp->descriptor_block[wvp->descriptors];
1988 
1989 	HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
1990 
1991 	wv_omi_descp->data_addr = 0;
1992 	wv_omi_descp->branch = 0;
1993 	wv_omi_descp->status = 0;
1994 	wv_omi_descp->q1 = wvp->xmit_pkthdr1;
1995 	wv_omi_descp->q2 = wvp->xmit_pkthdr2;
1996 	wv_omi_descp->q3 = 0;
1997 	wv_omi_descp->q4 = 0;
1998 
1999 	wvp->descriptors += 2;
2000 
2001 	/* follow with a single output last descriptor w/status enabled */
2002 	wv_descp = &wvp->descriptor_block[wvp->descriptors];
2003 	HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
2004 	    pktsize);
2005 	wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
2006 	wv_descp->branch = 0;
2007 	wv_descp->status = 0;
2008 	wvp->descriptors++;
2009 
2010 	/*
2011 	 * generate as many contiguous descriptor blocks as there are
2012 	 * xmit packets
2013 	 */
2014 	for (ii = 0; ii < pktcnt; ii++) {
2015 
2016 		/* if about to create last descriptor block */
2017 		if (ii == (pktcnt - 1)) {
2018 			/* check and perform any required hci cache flush */
2019 			if (hci1394_flush_end_desc_check(wvp, ii) !=
2020 			    DDI_SUCCESS) {
2021 				TNF_PROBE_0_DEBUG(
2022 				    hci1394_bld_xmit_buf_desc_exit,
2023 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2024 
2025 				/* wvp->dma_bld_error is set by above call */
2026 				return;
2027 			}
2028 		}
2029 
2030 		/* allocate and copy descriptor block to dma memory */
2031 		if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
2032 		    &dma_desc_bound) != DDI_SUCCESS) {
2033 			TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_exit,
2034 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2035 
2036 			/* wvp->dma_bld_error is set by above call */
2037 			return;
2038 		}
2039 
2040 		/*
2041 		 * set dma addrs into xfer_ctl structure (unbound addr
2042 		 * (kernel virtual) is last component (descriptor))
2043 		 */
2044 		xctlp->dma[ii].dma_bound = dma_desc_bound;
2045 		xctlp->dma[ii].dma_descp = dma_descp + 2 *
2046 		    sizeof (hci1394_desc_t);
2047 		xctlp->dma[ii].dma_buf	 = &wvp->dma_currentp->mem;
2048 
2049 		/* advance buffer ptr by pktsize in descriptor block */
2050 		wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
2051 		    pktsize;
2052 	}
2053 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_buf_desc_exit,
2054 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2055 }
2056 
2057 /*
2058  * hci1394_bld_xmit_hdronly_nopkt_desc()
2059  *    Used to create the openHCI dma descriptor blocks for transmitting
2060  *    a packet consisting of an isochronous header with no data payload,
2061  *    or for not sending a packet at all for a cycle.
2062  *
2063  *    A Store_Value openhci descriptor is built at the start of each
2064  *    IXL1394_OP_SEND_HDR_ONLY and IXL1394_OP_SEND_NO_PKT command's dma
2065  *    descriptor block (to allow for skip cycle specification and set skipmode
2066  *    processing for these commands).
2067  */
2068 static void
hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t * wvp)2069 hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp)
2070 {
2071 	hci1394_xfer_ctl_t	*xctlp;
2072 	hci1394_output_last_t	*wv_ol_descp; /* shorthand to local descrp */
2073 	hci1394_output_last_imm_t *wv_oli_descp; /* shorthand to local descrp */
2074 	caddr_t		dma_descp;
2075 	uint32_t	dma_desc_bound;
2076 	uint32_t	repcnt;
2077 	uint32_t	ii;
2078 
2079 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_hdronly_nopkt_desc_enter,
2080 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2081 
2082 	/* determine # of instances of output hdronly/nopkt to generate */
2083 	repcnt = ((ixl1394_xmit_special_t *)wvp->ixl_cur_xfer_stp)->count;
2084 
2085 	/*
2086 	 * allocate an xfer_ctl structure which includes repcnt
2087 	 * xfer_ctl_dma structs
2088 	 */
2089 	if ((xctlp = hci1394_alloc_xfer_ctl(wvp, repcnt)) == NULL) {
2090 
2091 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2092 
2093 		TNF_PROBE_2(hci1394_bld_xmit_hdronly_nopkt_desc_mem_alloc_fail,
2094 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2095 		    "IXL EMEM_ALLOC_FAIL: for xfer_ctl", tnf_opaque,
2096 		    ixl_commandp, wvp->ixl_cur_cmdp);
2097 		TNF_PROBE_0_DEBUG(hci1394_bld_xmit_hdronly_nopkt_desc_exit,
2098 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2099 		return;
2100 	}
2101 
2102 	/*
2103 	 * save xfer_ctl struct addr in compiler_privatep of
2104 	 * current IXL xfer command
2105 	 */
2106 	wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
2107 
2108 	/*
2109 	 * create a storevalue descriptor
2110 	 * (will be used for skip vs jump processing)
2111 	 */
2112 	hci1394_set_xmit_storevalue_desc(wvp);
2113 
2114 	/*
2115 	 * processing now based on opcode:
2116 	 * IXL1394_OP_SEND_HDR_ONLY or IXL1394_OP_SEND_NO_PKT
2117 	 */
2118 	if ((wvp->ixl_cur_xfer_stp->ixl_opcode & ~IXL1394_OPF_UPDATE) ==
2119 	    IXL1394_OP_SEND_HDR_ONLY) {
2120 
2121 		/* for header only, generate values for the xmit pkt hdrs */
2122 		hci1394_set_xmit_pkt_hdr(wvp);
2123 
2124 		/*
2125 		 * create an output last immediate (double sized) descriptor
2126 		 * xfer status enabled
2127 		 */
2128 		wv_oli_descp = (hci1394_output_last_imm_t *)
2129 		    &wvp->descriptor_block[wvp->descriptors];
2130 
2131 		HCI1394_INIT_IT_OLAST_IMM(wv_oli_descp, DESC_HDR_STAT_ENBL,
2132 		    DESC_INTR_DSABL);
2133 
2134 		wv_oli_descp->data_addr = 0;
2135 		wv_oli_descp->branch = 0;
2136 		wv_oli_descp->status = 0;
2137 		wv_oli_descp->q1 = wvp->xmit_pkthdr1;
2138 		wv_oli_descp->q2 = wvp->xmit_pkthdr2;
2139 		wv_oli_descp->q3 = 0;
2140 		wv_oli_descp->q4 = 0;
2141 		wvp->descriptors += 2;
2142 	} else {
2143 		/*
2144 		 * for skip cycle, create a single output last descriptor
2145 		 * with xfer status enabled
2146 		 */
2147 		wv_ol_descp = &wvp->descriptor_block[wvp->descriptors];
2148 		HCI1394_INIT_IT_OLAST(wv_ol_descp, DESC_HDR_STAT_ENBL,
2149 		    DESC_INTR_DSABL, 0);
2150 		wv_ol_descp->data_addr = 0;
2151 		wv_ol_descp->branch = 0;
2152 		wv_ol_descp->status = 0;
2153 		wvp->descriptors++;
2154 	}
2155 
2156 	/*
2157 	 * generate as many contiguous descriptor blocks as repeat count
2158 	 * indicates
2159 	 */
2160 	for (ii = 0; ii < repcnt; ii++) {
2161 
2162 		/* if about to create last descriptor block */
2163 		if (ii == (repcnt - 1)) {
2164 			/* check and perform any required hci cache flush */
2165 			if (hci1394_flush_end_desc_check(wvp, ii) !=
2166 			    DDI_SUCCESS) {
2167 				TNF_PROBE_0_DEBUG(
2168 				    hci1394_bld_xmit_hdronly_nopkt_desc_exit,
2169 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2170 
2171 				/* wvp->dma_bld_error is set by above call */
2172 				return;
2173 			}
2174 		}
2175 
2176 		/* allocate and copy descriptor block to dma memory */
2177 		if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
2178 		    &dma_desc_bound) != DDI_SUCCESS) {
2179 			TNF_PROBE_0_DEBUG(
2180 			    hci1394_bld_xmit_hdronly_nopkt_desc_exit,
2181 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2182 
2183 			/* wvp->dma_bld_error is set by above call */
2184 			return;
2185 		}
2186 
2187 		/*
2188 		 * set dma addrs into xfer_ctl structure (unbound addr
2189 		 * (kernel virtual) is last component (descriptor)
2190 		 */
2191 		xctlp->dma[ii].dma_bound = dma_desc_bound;
2192 		xctlp->dma[ii].dma_descp = dma_descp + sizeof (hci1394_desc_t);
2193 		xctlp->dma[ii].dma_buf	 = &wvp->dma_currentp->mem;
2194 	}
2195 	TNF_PROBE_0_DEBUG(hci1394_bld_xmit_hdronly_nopkt_desc_exit,
2196 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2197 }
2198 
2199 /*
2200  * hci1394_bld_dma_mem_desc_blk()
2201  *    Used to put a given OpenHCI descriptor block into dma bound memory.
2202  */
2203 static int
hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t * wvp,caddr_t * dma_descpp,uint32_t * dma_desc_bound)2204 hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp, caddr_t *dma_descpp,
2205     uint32_t *dma_desc_bound)
2206 {
2207 	uint32_t	dma_bound;
2208 
2209 	TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_enter,
2210 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2211 
2212 	/* set internal error if no descriptor blocks to build */
2213 	if (wvp->descriptors == 0) {
2214 
2215 		wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
2216 
2217 		TNF_PROBE_1(hci1394_bld_dma_mem_desc_blk_error,
2218 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2219 		    "IXL1394_INTERNAL_ERROR: no descriptors to build");
2220 		TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_exit,
2221 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2222 		return (DDI_FAILURE);
2223 	}
2224 
2225 	/* allocate dma memory and move this descriptor block to it */
2226 	*dma_descpp = (caddr_t)hci1394_alloc_dma_mem(wvp, wvp->descriptors *
2227 	    sizeof (hci1394_desc_t), &dma_bound);
2228 
2229 	if (*dma_descpp == NULL) {
2230 
2231 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2232 
2233 		TNF_PROBE_1(hci1394_bld_dma_mem_desc_blk_fail,
2234 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2235 		    "IXL1394_EMEM_ALLOC_FAIL: for descriptors");
2236 		TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_exit,
2237 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2238 		return (DDI_FAILURE);
2239 	}
2240 #ifdef _KERNEL
2241 	ddi_rep_put32(wvp->dma_currentp->mem.bi_handle,
2242 	    (uint_t *)wvp->descriptor_block, (uint_t *)*dma_descpp,
2243 	    wvp->descriptors * (sizeof (hci1394_desc_t) >> 2),
2244 	    DDI_DEV_AUTOINCR);
2245 #else
2246 	bcopy(wvp->descriptor_block, *dma_descpp,
2247 	    wvp->descriptors * sizeof (hci1394_desc_t));
2248 #endif
2249 	/*
2250 	 * convert allocated block's memory address to bus address space
2251 	 * include properly set Z bits (descriptor count).
2252 	 */
2253 	*dma_desc_bound = (dma_bound & ~DESC_Z_MASK) | wvp->descriptors;
2254 
2255 	TNF_PROBE_0_DEBUG(hci1394_bld_dma_mem_desc_blk_exit,
2256 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2257 
2258 	return (DDI_SUCCESS);
2259 }
2260 
2261 /*
2262  * hci1394_set_xmit_pkt_hdr()
2263  *    Compose the 2 quadlets for the xmit packet header.
2264  */
2265 static void
hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t * wvp)2266 hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp)
2267 {
2268 	uint16_t tag;
2269 	uint16_t sync;
2270 
2271 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_pkt_hdr_enter,
2272 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2273 
2274 	/*
2275 	 * choose tag and sync bits for header either from default values or
2276 	 * from currently active set tag and sync IXL command
2277 	 * (clear command after use)
2278 	 */
2279 	if (wvp->ixl_settagsync_cmdp == NULL) {
2280 		tag = wvp->default_tag;
2281 		sync = wvp->default_sync;
2282 	} else {
2283 		tag = wvp->ixl_settagsync_cmdp->tag;
2284 		sync = wvp->ixl_settagsync_cmdp->sync;
2285 		wvp->ixl_settagsync_cmdp = NULL;
2286 	}
2287 	tag &= (DESC_PKT_TAG_MASK >> DESC_PKT_TAG_SHIFT);
2288 	sync &= (DESC_PKT_SY_MASK >> DESC_PKT_SY_SHIFT);
2289 
2290 	/*
2291 	 * build xmit pkt header -
2292 	 * hdr1 has speed, tag, channel number and sync bits
2293 	 * hdr2 has the packet length.
2294 	 */
2295 	wvp->xmit_pkthdr1 = (wvp->ctxtp->isospd << DESC_PKT_SPD_SHIFT) |
2296 	    (tag << DESC_PKT_TAG_SHIFT) | (wvp->ctxtp->isochan <<
2297 	    DESC_PKT_CHAN_SHIFT) | (IEEE1394_TCODE_ISOCH <<
2298 	    DESC_PKT_TCODE_SHIFT) | (sync << DESC_PKT_SY_SHIFT);
2299 
2300 	wvp->xmit_pkthdr2 = wvp->xfer_pktlen << DESC_PKT_DATALEN_SHIFT;
2301 
2302 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_pkt_hdr_exit,
2303 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2304 }
2305 
2306 /*
2307  * hci1394_set_xmit_skip_mode()
2308  *    Set current skip mode from default or from currently active command.
2309  *    If non-default skip mode command's skip mode is skip to label, find
2310  *    and set xfer start IXL command which follows skip to label into
2311  *    compiler_privatep of set skipmode IXL command.
2312  */
2313 static void
hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t * wvp)2314 hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp)
2315 {
2316 	int err;
2317 
2318 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_skip_mode_enter,
2319 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2320 
2321 	if (wvp->ixl_setskipmode_cmdp == NULL) {
2322 		wvp->skipmode = wvp->default_skipmode;
2323 		wvp->skiplabelp = wvp->default_skiplabelp;
2324 		wvp->skipxferp = wvp->default_skipxferp;
2325 	} else {
2326 		wvp->skipmode = wvp->ixl_setskipmode_cmdp->skipmode;
2327 		wvp->skiplabelp = wvp->ixl_setskipmode_cmdp->label;
2328 		wvp->skipxferp = NULL;
2329 		if (wvp->skipmode == IXL1394_SKIP_TO_LABEL) {
2330 			err = hci1394_ixl_find_next_exec_xfer(wvp->skiplabelp,
2331 			    NULL, &wvp->skipxferp);
2332 			if (err == DDI_FAILURE) {
2333 				TNF_PROBE_2(hci1394_set_xmit_skip_mode_error,
2334 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
2335 				    errmsg, "IXL1394_ENO_DATA_PKTS: "
2336 				    "label<->jump loop detected for skiplabel "
2337 				    "w/no xfers", tnf_opaque, setskip_cmdp,
2338 				    wvp->ixl_setskipmode_cmdp);
2339 				wvp->skipxferp = NULL;
2340 				wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
2341 			}
2342 		}
2343 		wvp->ixl_setskipmode_cmdp->compiler_privatep =
2344 		    (void *)wvp->skipxferp;
2345 	}
2346 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_skip_mode_exit,
2347 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2348 }
2349 
2350 /*
2351  * hci1394_set_xmit_storevalue_desc()
2352  *    Set up store_value DMA descriptor.
2353  *    XMIT_HDRONLY or XMIT_NOPKT xfer states use a store value as first
2354  *    descriptor in the descriptor block (to handle skip mode processing)
2355  */
2356 static void
hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t * wvp)2357 hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp)
2358 {
2359 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_storevalue_desc_enter,
2360 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2361 
2362 	wvp->descriptors++;
2363 
2364 	HCI1394_INIT_IT_STORE(&wvp->descriptor_block[wvp->descriptors - 1],
2365 	    wvp->storevalue_data);
2366 	wvp->descriptor_block[wvp->descriptors - 1].data_addr =
2367 	    wvp->storevalue_bufp;
2368 	wvp->descriptor_block[wvp->descriptors - 1].branch = 0;
2369 	wvp->descriptor_block[wvp->descriptors - 1].status = 0;
2370 
2371 	TNF_PROBE_0_DEBUG(hci1394_set_xmit_storevalue_desc_exit,
2372 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2373 }
2374 
2375 /*
2376  * hci1394_set_next_xfer_buf()
2377  *    This routine adds the data buffer to the current wvp list.
2378  *    Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2379  *    contains the error code.
2380  */
2381 static int
hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t * wvp,uint32_t bufp,uint16_t size)2382 hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp, uint32_t bufp,
2383     uint16_t size)
2384 {
2385 	TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_enter,
2386 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2387 
2388 	/* error if buffer pointer is null (size may be 0) */
2389 	if (bufp == NULL) {
2390 
2391 		wvp->dma_bld_error = IXL1394_ENULL_BUFFER_ADDR;
2392 
2393 		TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_exit,
2394 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2395 		return (DDI_FAILURE);
2396 	}
2397 
2398 	/* count new xfer buffer */
2399 	wvp->xfer_bufcnt++;
2400 
2401 	/* error if exceeds maximum xfer buffer components allowed */
2402 	if (wvp->xfer_bufcnt > HCI1394_DESC_MAX_Z) {
2403 
2404 		wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
2405 
2406 		TNF_PROBE_2(hci1394_set_next_xfer_buf_error,
2407 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2408 		    "IXL1394_EFRAGMENT_OFLO", tnf_int, frag_count,
2409 		    wvp->xfer_bufcnt);
2410 		TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_exit,
2411 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2412 		return (DDI_FAILURE);
2413 	}
2414 
2415 	/* save xmit buffer and size */
2416 	wvp->xfer_bufp[wvp->xfer_bufcnt - 1] = bufp;
2417 	wvp->xfer_size[wvp->xfer_bufcnt - 1] = size;
2418 
2419 	/* accumulate total packet length */
2420 	wvp->xfer_pktlen += size;
2421 
2422 	TNF_PROBE_0_DEBUG(hci1394_set_next_xfer_buf_exit,
2423 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2424 	return (DDI_SUCCESS);
2425 }
2426 
2427 /*
2428  * hci1394_flush_end_desc_check()
2429  *    Check if flush required before last descriptor block of a
2430  *    non-unary set generated by an xfer buff or xmit special command
2431  *    or a unary set provided no other flush has already been done.
2432  *
2433  *    hci flush is required if xfer is finalized by an updateable
2434  *    jump command.
2435  *
2436  *    Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2437  *    will contain the error code.
2438  */
2439 static int
hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t * wvp,uint32_t count)2440 hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp, uint32_t count)
2441 {
2442 	TNF_PROBE_0_DEBUG(hci1394_flush_end_desc_check_enter,
2443 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2444 
2445 	if ((count != 0) ||
2446 	    ((wvp->xfer_hci_flush & (UPDATEABLE_XFER | UPDATEABLE_SET |
2447 		INITIATING_LBL)) == 0)) {
2448 
2449 		if (wvp->xfer_hci_flush & UPDATEABLE_JUMP) {
2450 			if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
2451 
2452 				TNF_PROBE_0_DEBUG(
2453 				    hci1394_flush_end_desc_check_exit,
2454 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2455 
2456 				/* wvp->dma_bld_error is set by above call */
2457 				return (DDI_FAILURE);
2458 			}
2459 		}
2460 	}
2461 
2462 	TNF_PROBE_0_DEBUG(hci1394_flush_end_desc_check_exit,
2463 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2464 	return (DDI_SUCCESS);
2465 }
2466 
2467 /*
2468  * hci1394_flush_hci_cache()
2469  *    Sun hci controller (RIO) implementation specific processing!
2470  *
2471  *    Allocate dma memory for 1 hci descriptor block which will be left unused.
2472  *    During execution this will cause a break in the contiguous address space
2473  *    processing required by Sun's RIO implementation of the ohci controller and
2474  *    will require the controller to refetch the next descriptor block from
2475  *    host memory.
2476  *
2477  *    General rules for cache flush preceeding a descriptor block in dma memory:
2478  *    1. Current IXL Xfer Command Updateable Rule:
2479  *	    Cache flush of IXL xfer command is required if it, or any of the
2480  *	    non-start IXL packet xfer commands associated with it, is flagged
2481  *	    updateable.
2482  *    2. Next IXL Xfer Command Indeterminate Rule:
2483  *	    Cache flush of IXL xfer command is required if an IXL jump command
2484  *	    which is flagged updateable has finalized the current IXL xfer
2485  *	    command.
2486  *    3. Updateable IXL Set Command Rule:
2487  *	    Cache flush of an IXL xfer command is required if any of the IXL
2488  *	    "Set" commands (IXL1394_OP_SET_*) associated with the IXL xfer
2489  *	    command (i.e. immediately preceeding it), is flagged updateable.
2490  *    4. Label Initiating Xfer Command Rule:
2491  *	    Cache flush of IXL xfer command is required if it is initiated by a
2492  *	    label IXL command.  (This is to allow both a flush of the cache and
2493  *	    an interrupt to be generated easily and in close proximity to each
2494  *	    other.  This can make possible simpler more successful reset of
2495  *	    descriptor statuses, especially under circumstances where the cycle
2496  *	    of hci commands is short and/or there are no callbacks distributed
2497  *	    through the span of xfers, etc...  This is especially important for
2498  *	    input where statuses must be reset before execution cycles back
2499  *	    again.
2500  *
2501  *    Application of above rules:
2502  *    Packet mode IXL xfer commands:
2503  *	    If any of the above flush rules apply, flush cache should be done
2504  *	    immediately preceeding the generation of the dma descriptor block
2505  *	    for the packet xfer.
2506  *    Non-packet mode IXL xfer commands (including IXL1394_OP_*BUF*,
2507  *    SEND_HDR_ONLY, and SEND_NO_PKT):
2508  *	    If Rules #1, #3 or #4 applies, a flush cache should be done
2509  *	    immediately before the first generated dma descriptor block of the
2510  *	    non-packet xfer.
2511  *	    If Rule #2 applies, a flush cache should be done immediately before
2512  *	    the last generated dma descriptor block of the non-packet xfer.
2513  *
2514  *    Note: The flush cache should be done at most once in each location that is
2515  *    required to be flushed no matter how many rules apply (i.e. only once
2516  *    before the first descriptor block and/or only once before the last
2517  *    descriptor block generated).  If more than one place requires a flush,
2518  *    then both flush operations must be performed.  This is determined by
2519  *    taking all rules that apply into account.
2520  *
2521  *    Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2522  *    will contain the error code.
2523  */
2524 static int
hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t * wvp)2525 hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp)
2526 {
2527 	uint32_t	dma_bound;
2528 
2529 	TNF_PROBE_0_DEBUG(hci1394_flush_hci_cache_enter,
2530 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2531 
2532 	if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t), &dma_bound) ==
2533 	    NULL) {
2534 
2535 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2536 
2537 		TNF_PROBE_1(hci1394_flush_hci_cache_fail,
2538 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2539 		    "IXL1394_EMEM_ALLOC_FAIL: for flush_hci_cache");
2540 		TNF_PROBE_0_DEBUG(hci1394_flush_hci_cache_exit,
2541 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2542 		return (DDI_FAILURE);
2543 	}
2544 
2545 	TNF_PROBE_0_DEBUG(hci1394_flush_hci_cache_exit,
2546 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2547 	return (DDI_SUCCESS);
2548 }
2549 
2550 /*
2551  * hci1394_alloc_storevalue_dma_mem()
2552  *    Allocate dma memory for a 1 hci component descriptor block
2553  *    which will be used as the dma memory location that ixl
2554  *    compiler generated storevalue descriptor commands will
2555  *    specify as location to store their data value.
2556  *
2557  *    Returns 32-bit bound address of allocated mem, or NULL.
2558  */
2559 static uint32_t
hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t * wvp)2560 hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp)
2561 {
2562 	uint32_t	dma_bound;
2563 
2564 	TNF_PROBE_0_DEBUG(hci1394_alloc_storevalue_dma_mem_enter,
2565 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2566 
2567 	if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t),
2568 	    &dma_bound) == NULL) {
2569 
2570 		wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2571 
2572 		TNF_PROBE_2(hci1394_bld_alloc_storevalue_dma_mem_alloc_fail,
2573 		    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
2574 		    "IXL1394_EMEM_ALLOC_FAIL: for storevalue dma",
2575 		    tnf_opaque, ixl_commandp, wvp->ixl_cur_cmdp);
2576 		TNF_PROBE_0_DEBUG(hci1394_alloc_storevalue_dma_mem_exit,
2577 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2578 		return (NULL);
2579 	}
2580 
2581 	TNF_PROBE_0_DEBUG(hci1394_alloc_storevalue_dma_mem_exit,
2582 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2583 
2584 	/* return bound address of allocated memory */
2585 	return (dma_bound);
2586 }
2587 
2588 
2589 /*
2590  * hci1394_alloc_xfer_ctl()
2591  *    Allocate an xfer_ctl structure.
2592  */
2593 static hci1394_xfer_ctl_t *
hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t * wvp,uint32_t dmacnt)2594 hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp, uint32_t dmacnt)
2595 {
2596 	hci1394_xfer_ctl_t *xcsp;
2597 
2598 	TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_enter,
2599 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2600 
2601 	/*
2602 	 * allocate an xfer_ctl struct which includes dmacnt of
2603 	 * xfer_ctl_dma structs
2604 	 */
2605 #ifdef _KERNEL
2606 	if ((xcsp = (hci1394_xfer_ctl_t *)kmem_zalloc(
2607 	    (sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
2608 	    sizeof (hci1394_xfer_ctl_dma_t)), KM_NOSLEEP)) == NULL) {
2609 
2610 		TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_exit,
2611 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2612 		return (NULL);
2613 	}
2614 #else
2615 	/*
2616 	 * This section makes it possible to easily run and test the compiler in
2617 	 * user mode.
2618 	 */
2619 	if ((xcsp = (hci1394_xfer_ctl_t *)calloc(1,
2620 	    sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
2621 	    sizeof (hci1394_xfer_ctl_dma_t))) == NULL) {
2622 
2623 		TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_exit,
2624 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2625 		return (NULL);
2626 	}
2627 #endif
2628 	/*
2629 	 * set dma structure count into allocated xfer_ctl struct for
2630 	 * later deletion.
2631 	 */
2632 	xcsp->cnt = dmacnt;
2633 
2634 	/* link it to previously allocated xfer_ctl structs or set as first */
2635 	if (wvp->xcs_firstp == NULL) {
2636 		wvp->xcs_firstp = wvp->xcs_currentp = xcsp;
2637 	} else {
2638 		wvp->xcs_currentp->ctl_nextp = xcsp;
2639 		wvp->xcs_currentp = xcsp;
2640 	}
2641 
2642 	TNF_PROBE_0_DEBUG(hci1394_alloc_xfer_ctl_exit,
2643 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2644 
2645 	/* return allocated xfer_ctl structure */
2646 	return (xcsp);
2647 }
2648 
2649 /*
2650  * hci1394_alloc_dma_mem()
2651  *	Allocates and binds memory for openHCI DMA descriptors as needed.
2652  */
2653 static void *
hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t * wvp,uint32_t size,uint32_t * dma_bound)2654 hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp, uint32_t size,
2655     uint32_t *dma_bound)
2656 {
2657 	hci1394_idma_desc_mem_t *dma_new;
2658 	hci1394_buf_parms_t parms;
2659 	hci1394_buf_info_t *memp;
2660 	void	*dma_mem_ret;
2661 	int	ret;
2662 
2663 	TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_enter,
2664 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2665 
2666 	/*
2667 	 * if no dma has been allocated or current request exceeds
2668 	 * remaining memory
2669 	 */
2670 	if ((wvp->dma_currentp == NULL) ||
2671 	    (size > (wvp->dma_currentp->mem.bi_cookie.dmac_size -
2672 		wvp->dma_currentp->used))) {
2673 #ifdef _KERNEL
2674 		/* kernel-mode memory allocation for driver */
2675 
2676 		/* allocate struct to track more dma descriptor memory */
2677 		if ((dma_new = (hci1394_idma_desc_mem_t *)
2678 		    kmem_zalloc(sizeof (hci1394_idma_desc_mem_t),
2679 		    KM_NOSLEEP)) == NULL) {
2680 
2681 			TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2682 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2683 			return (NULL);
2684 		}
2685 
2686 		/*
2687 		 * if more cookies available from the current mem, try to find
2688 		 * one of suitable size. Cookies that are too small will be
2689 		 * skipped and unused. Given that cookie size is always at least
2690 		 * 1 page long and HCI1394_DESC_MAX_Z is much smaller than that,
2691 		 * it's a small price to pay for code simplicity.
2692 		 */
2693 		if (wvp->dma_currentp != NULL) {
2694 			/* new struct is derived from current */
2695 			memp = &wvp->dma_currentp->mem;
2696 			dma_new->mem = *memp;
2697 			dma_new->offset = wvp->dma_currentp->offset +
2698 			    memp->bi_cookie.dmac_size;
2699 
2700 			for (; memp->bi_cookie_count > 1;
2701 			    memp->bi_cookie_count--) {
2702 				ddi_dma_nextcookie(memp->bi_dma_handle,
2703 				    &dma_new->mem.bi_cookie);
2704 
2705 				if (dma_new->mem.bi_cookie.dmac_size >= size) {
2706 					dma_new->mem_handle =
2707 					    wvp->dma_currentp->mem_handle;
2708 					wvp->dma_currentp->mem_handle = NULL;
2709 					dma_new->mem.bi_cookie_count--;
2710 					break;
2711 				}
2712 				dma_new->offset +=
2713 				    dma_new->mem.bi_cookie.dmac_size;
2714 			}
2715 		}
2716 
2717 		/* if no luck with current buffer, allocate a new one */
2718 		if (dma_new->mem_handle == NULL) {
2719 			parms.bp_length = HCI1394_IXL_PAGESIZE;
2720 			parms.bp_max_cookies = OHCI_MAX_COOKIE;
2721 			parms.bp_alignment = 16;
2722 			ret = hci1394_buf_alloc(&wvp->soft_statep->drvinfo,
2723 			    &parms, &dma_new->mem, &dma_new->mem_handle);
2724 			if (ret != DDI_SUCCESS) {
2725 				kmem_free(dma_new,
2726 				    sizeof (hci1394_idma_desc_mem_t));
2727 
2728 				TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2729 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2730 				return (NULL);
2731 			}
2732 
2733 			/* paranoia: this is not supposed to happen */
2734 			if (dma_new->mem.bi_cookie.dmac_size < size) {
2735 				hci1394_buf_free(&dma_new->mem_handle);
2736 				kmem_free(dma_new,
2737 				    sizeof (hci1394_idma_desc_mem_t));
2738 
2739 				TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2740 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
2741 				return (NULL);
2742 			}
2743 			dma_new->offset = 0;
2744 		}
2745 #else
2746 		/* user-mode memory allocation for user mode compiler tests */
2747 		/* allocate another dma_desc_mem struct */
2748 		if ((dma_new = (hci1394_idma_desc_mem_t *)
2749 			calloc(1, sizeof (hci1394_idma_desc_mem_t))) == NULL) {
2750 			TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2751 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2752 			return (NULL);
2753 		}
2754 		dma_new->mem.bi_dma_handle = NULL;
2755 		dma_new->mem.bi_handle = NULL;
2756 		if ((dma_new->mem.bi_kaddr = (caddr_t)calloc(1,
2757 			    HCI1394_IXL_PAGESIZE)) == NULL) {
2758 			TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2759 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
2760 			return (NULL);
2761 		}
2762 		dma_new->mem.bi_cookie.dmac_address =
2763 		    (unsigned long)dma_new->mem.bi_kaddr;
2764 		dma_new->mem.bi_real_length = HCI1394_IXL_PAGESIZE;
2765 		dma_new->mem.bi_cookie_count = 1;
2766 #endif
2767 
2768 		/* if this is not first dma_desc_mem, link last one to it */
2769 		if (wvp->dma_currentp != NULL) {
2770 			wvp->dma_currentp->dma_nextp = dma_new;
2771 			wvp->dma_currentp = dma_new;
2772 		} else {
2773 			/* else set it as first one */
2774 			wvp->dma_currentp = wvp->dma_firstp = dma_new;
2775 		}
2776 	}
2777 
2778 	/* now allocate requested memory from current block */
2779 	dma_mem_ret = wvp->dma_currentp->mem.bi_kaddr +
2780 	    wvp->dma_currentp->offset + wvp->dma_currentp->used;
2781 	*dma_bound = wvp->dma_currentp->mem.bi_cookie.dmac_address +
2782 	    wvp->dma_currentp->used;
2783 	wvp->dma_currentp->used += size;
2784 
2785 	TNF_PROBE_0_DEBUG(hci1394_alloc_dma_mem_exit,
2786 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2787 	return (dma_mem_ret);
2788 }
2789 
2790 
2791 /*
2792  * hci1394_is_opcode_valid()
2793  *    given an ixl opcode, this routine returns B_TRUE if it is a
2794  *    recognized opcode and B_FALSE if it is not recognized.
2795  *    Note that the FULL 16 bits of the opcode are checked which includes
2796  *    various flags and not just the low order 8 bits of unique code.
2797  */
2798 static boolean_t
hci1394_is_opcode_valid(uint16_t ixlopcode)2799 hci1394_is_opcode_valid(uint16_t ixlopcode)
2800 {
2801 	TNF_PROBE_0_DEBUG(hci1394_is_opcode_bad_enter,
2802 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
2803 
2804 	/* if it's not one we know about, then it's bad */
2805 	switch (ixlopcode) {
2806 	case IXL1394_OP_LABEL:
2807 	case IXL1394_OP_JUMP:
2808 	case IXL1394_OP_CALLBACK:
2809 	case IXL1394_OP_RECV_PKT:
2810 	case IXL1394_OP_RECV_PKT_ST:
2811 	case IXL1394_OP_RECV_BUF:
2812 	case IXL1394_OP_SEND_PKT:
2813 	case IXL1394_OP_SEND_PKT_ST:
2814 	case IXL1394_OP_SEND_PKT_WHDR_ST:
2815 	case IXL1394_OP_SEND_BUF:
2816 	case IXL1394_OP_SEND_HDR_ONLY:
2817 	case IXL1394_OP_SEND_NO_PKT:
2818 	case IXL1394_OP_STORE_TIMESTAMP:
2819 	case IXL1394_OP_SET_TAGSYNC:
2820 	case IXL1394_OP_SET_SKIPMODE:
2821 	case IXL1394_OP_SET_SYNCWAIT:
2822 	case IXL1394_OP_JUMP_U:
2823 	case IXL1394_OP_CALLBACK_U:
2824 	case IXL1394_OP_RECV_PKT_U:
2825 	case IXL1394_OP_RECV_PKT_ST_U:
2826 	case IXL1394_OP_RECV_BUF_U:
2827 	case IXL1394_OP_SEND_PKT_U:
2828 	case IXL1394_OP_SEND_PKT_ST_U:
2829 	case IXL1394_OP_SEND_PKT_WHDR_ST_U:
2830 	case IXL1394_OP_SEND_BUF_U:
2831 	case IXL1394_OP_SET_TAGSYNC_U:
2832 	case IXL1394_OP_SET_SKIPMODE_U:
2833 		TNF_PROBE_1_DEBUG(hci1394_is_opcode_valid_enter,
2834 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
2835 		    "ixl opcode is valid");
2836 		TNF_PROBE_0_DEBUG(hci1394_is_opcode_bad_enter,
2837 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2838 		return (B_TRUE);
2839 	default:
2840 		TNF_PROBE_2(hci1394_is_opcode_valid_enter,
2841 		    HCI1394_TNF_HAL_STACK_ISOCH, "", tnf_string, msg,
2842 		    "ixl opcode is NOT valid", tnf_opaque, ixl_opcode,
2843 		    ixlopcode);
2844 		TNF_PROBE_0_DEBUG(hci1394_is_opcode_valid_enter,
2845 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
2846 		return (B_FALSE);
2847 	}
2848 }
2849