1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * hci1394_ixl_comp.c
29 * Isochronous IXL Compiler.
30 * The compiler converts the general hardware independent IXL command
31 * blocks into OpenHCI DMA descriptors.
32 */
33
34 #include <sys/kmem.h>
35 #include <sys/types.h>
36 #include <sys/conf.h>
37 #include <sys/ddi.h>
38 #include <sys/sunddi.h>
39 #include <sys/1394/h1394.h>
40 #include <sys/1394/ixl1394.h>
41 #include <sys/1394/adapters/hci1394.h>
42
43 /* compiler allocation size for DMA descriptors. 8000 is 500 descriptors */
44 #define HCI1394_IXL_PAGESIZE 8000
45
46 /* invalid opcode */
47 #define IXL1394_OP_INVALID (0 | IXL1394_OPTY_OTHER)
48
49 /*
50 * maximum number of interrupts permitted for a single context in which
51 * the context does not advance to the next DMA descriptor. Interrupts are
52 * triggered by 1) hardware completing a DMA descriptor block which has the
53 * interrupt (i) bits set, 2) a cycle_inconsistent interrupt, or 3) a cycle_lost
54 * interrupt. Once the max is reached, the HCI1394_IXL_INTR_NOADV error is
55 * returned.
56 */
57 int hci1394_ixl_max_noadv_intrs = 8;
58
59
60 static void hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
61 hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
62 ixl1394_command_t *ixlp);
63 static void hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp);
64 static void hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp,
65 ixl1394_command_t *ixlp);
66 static void hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
67 static void hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp);
68 static void hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
69 static void hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp);
70 static void hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp);
71 static void hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp);
72 static void hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp);
73 static void hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp);
74 static int hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp,
75 caddr_t *dma_descpp, uint32_t *dma_desc_bound);
76 static void hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp);
77 static void hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp);
78 static void hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp);
79 static int hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp,
80 uint32_t bufp, uint16_t size);
81 static int hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp,
82 uint32_t count);
83 static int hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp);
84 static uint32_t hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp);
85 static hci1394_xfer_ctl_t *hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp,
86 uint32_t dmacnt);
87 static void *hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp,
88 uint32_t size, uint32_t *dma_bound);
89 static boolean_t hci1394_is_opcode_valid(uint16_t ixlopcode);
90
91
92 /*
93 * FULL LIST OF ACCEPTED IXL COMMAND OPCOCDES:
94 * Receive Only: Transmit Only:
95 * IXL1394_OP_RECV_PKT_ST IXL1394_OP_SEND_PKT_WHDR_ST
96 * IXL1394_OP_RECV_PKT IXL1394_OP_SEND_PKT_ST
97 * IXL1394_OP_RECV_BUF IXL1394_OP_SEND_PKT
98 * IXL1394_OP_SET_SYNCWAIT IXL1394_OP_SEND_BUF
99 * IXL1394_OP_SEND_HDR_ONLY
100 * Receive or Transmit: IXL1394_OP_SEND_NO_PKT
101 * IXL1394_OP_CALLBACK IXL1394_OP_SET_TAGSYNC
102 * IXL1394_OP_LABEL IXL1394_OP_SET_SKIPMODE
103 * IXL1394_OP_JUMP IXL1394_OP_STORE_TIMESTAMP
104 */
105
106 /*
107 * hci1394_compile_ixl()
108 * Top level ixl compiler entry point. Scans ixl and builds openHCI 1.0
109 * descriptor blocks in dma memory.
110 */
111 int
hci1394_compile_ixl(hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp,ixl1394_command_t * ixlp,int * resultp)112 hci1394_compile_ixl(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
113 ixl1394_command_t *ixlp, int *resultp)
114 {
115 hci1394_comp_ixl_vars_t wv; /* working variables used throughout */
116
117 ASSERT(soft_statep != NULL);
118 ASSERT(ctxtp != NULL);
119
120 /* Initialize compiler working variables */
121 hci1394_compile_ixl_init(&wv, soft_statep, ctxtp, ixlp);
122
123 /*
124 * First pass:
125 * Parse ixl commands, building desc blocks, until end of IXL
126 * linked list.
127 */
128 hci1394_parse_ixl(&wv, ixlp);
129
130 /*
131 * Second pass:
132 * Resolve all generated descriptor block jump and skip addresses.
133 * Set interrupt enable in descriptor blocks which have callback
134 * operations in their execution scope. (Previously store_timesamp
135 * operations were counted also.) Set interrupt enable in descriptor
136 * blocks which were introduced by an ixl label command.
137 */
138 if (wv.dma_bld_error == 0) {
139 hci1394_finalize_all_xfer_desc(&wv);
140 }
141
142 /* Endup: finalize and cleanup ixl compile, return result */
143 hci1394_compile_ixl_endup(&wv);
144
145 *resultp = wv.dma_bld_error;
146 if (*resultp != 0)
147 return (DDI_FAILURE);
148 return (DDI_SUCCESS);
149 }
150
151 /*
152 * hci1394_compile_ixl_init()
153 * Initialize the isoch context structure associated with the IXL
154 * program, and initialize the temporary working variables structure.
155 */
156 static void
hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t * wvp,hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp,ixl1394_command_t * ixlp)157 hci1394_compile_ixl_init(hci1394_comp_ixl_vars_t *wvp,
158 hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp,
159 ixl1394_command_t *ixlp)
160 {
161 /* initialize common recv/xmit compile values */
162 wvp->soft_statep = soft_statep;
163 wvp->ctxtp = ctxtp;
164
165 /* init/clear ctxtp values */
166 ctxtp->dma_mem_execp = 0;
167 ctxtp->dma_firstp = NULL;
168 ctxtp->dma_last_time = 0;
169 ctxtp->xcs_firstp = NULL;
170 ctxtp->ixl_exec_depth = 0;
171 ctxtp->ixl_execp = NULL;
172 ctxtp->ixl_firstp = ixlp;
173 ctxtp->default_skipxferp = NULL;
174
175 /*
176 * the context's max_noadv_intrs is set here instead of in isoch init
177 * because the default is patchable and would only be picked up this way
178 */
179 ctxtp->max_noadv_intrs = hci1394_ixl_max_noadv_intrs;
180
181 /* init working variables */
182 wvp->xcs_firstp = NULL;
183 wvp->xcs_currentp = NULL;
184
185 wvp->dma_firstp = NULL;
186 wvp->dma_currentp = NULL;
187 wvp->dma_bld_error = 0;
188
189 wvp->ixl_io_mode = ctxtp->ctxt_flags;
190 wvp->ixl_cur_cmdp = NULL;
191 wvp->ixl_cur_xfer_stp = NULL;
192 wvp->ixl_cur_labelp = NULL;
193
194 wvp->ixl_xfer_st_cnt = 0; /* count of xfer start commands found */
195 wvp->xfer_state = XFER_NONE; /* none, pkt, buf, skip, hdronly */
196 wvp->xfer_hci_flush = 0; /* updateable - xfer, jump, set */
197 wvp->xfer_pktlen = 0;
198 wvp->xfer_bufcnt = 0;
199 wvp->descriptors = 0;
200
201 /* START RECV ONLY SECTION */
202 wvp->ixl_setsyncwait_cnt = 0;
203
204 /* START XMIT ONLY SECTION */
205 wvp->ixl_settagsync_cmdp = NULL;
206 wvp->ixl_setskipmode_cmdp = NULL;
207 wvp->default_skipmode = ctxtp->default_skipmode; /* nxt,self,stop,jmp */
208 wvp->default_skiplabelp = ctxtp->default_skiplabelp;
209 wvp->default_skipxferp = NULL;
210 wvp->skipmode = ctxtp->default_skipmode;
211 wvp->skiplabelp = NULL;
212 wvp->skipxferp = NULL;
213 wvp->default_tag = ctxtp->default_tag;
214 wvp->default_sync = ctxtp->default_sync;
215 wvp->storevalue_bufp = hci1394_alloc_storevalue_dma_mem(wvp);
216 wvp->storevalue_data = 0;
217 wvp->xmit_pkthdr1 = 0;
218 wvp->xmit_pkthdr2 = 0;
219 /* END XMIT ONLY SECTION */
220 }
221
222 /*
223 * hci1394_compile_ixl_endup()
224 * This routine is called just before the main hci1394_compile_ixl() exits.
225 * It checks for errors and performs the appropriate cleanup, or it rolls any
226 * relevant info from the working variables struct into the context structure
227 */
228 static void
hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t * wvp)229 hci1394_compile_ixl_endup(hci1394_comp_ixl_vars_t *wvp)
230 {
231 ixl1394_command_t *ixl_exec_stp;
232 hci1394_idma_desc_mem_t *dma_nextp;
233 int err;
234
235 /* error if no descriptor blocks found in ixl & created in dma memory */
236 if ((wvp->dma_bld_error == 0) && (wvp->ixl_xfer_st_cnt == 0)) {
237 wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
238 }
239
240 /* if no errors yet, find the first IXL command that's a transfer cmd */
241 if (wvp->dma_bld_error == 0) {
242 err = hci1394_ixl_find_next_exec_xfer(wvp->ctxtp->ixl_firstp,
243 NULL, &ixl_exec_stp);
244
245 /* error if a label<->jump loop, or no xfer */
246 if ((err == DDI_FAILURE) || (ixl_exec_stp == NULL)) {
247 wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
248 }
249 }
250
251 /* Sync all the DMA descriptor buffers */
252 dma_nextp = wvp->ctxtp->dma_firstp;
253 while (dma_nextp != NULL) {
254 err = ddi_dma_sync(dma_nextp->mem.bi_dma_handle,
255 (off_t)dma_nextp->mem.bi_kaddr, dma_nextp->mem.bi_length,
256 DDI_DMA_SYNC_FORDEV);
257 if (err != DDI_SUCCESS) {
258 wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
259
260 break;
261 }
262
263 /* advance to next dma memory descriptor */
264 dma_nextp = dma_nextp->dma_nextp;
265 }
266
267 /*
268 * If error, cleanup and return. delete all allocated xfer_ctl structs
269 * and all dma descriptor page memory and its dma memory blocks too.
270 */
271 if (wvp->dma_bld_error != 0) {
272 wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
273 wvp->ctxtp->dma_firstp = wvp->dma_firstp;
274 hci1394_ixl_cleanup(wvp->soft_statep, wvp->ctxtp);
275
276 return;
277 }
278
279 /* can only get to here if the first ixl transfer command is found */
280
281 /* set required processing vars into ctxtp struct */
282 wvp->ctxtp->default_skipxferp = wvp->default_skipxferp;
283 wvp->ctxtp->dma_mem_execp = 0;
284
285 /*
286 * the transfer command's compiler private xfer_ctl structure has the
287 * appropriate bound address
288 */
289 wvp->ctxtp->dma_mem_execp = (uint32_t)((hci1394_xfer_ctl_t *)
290 ixl_exec_stp->compiler_privatep)->dma[0].dma_bound;
291 wvp->ctxtp->xcs_firstp = (void *)wvp->xcs_firstp;
292 wvp->ctxtp->dma_firstp = wvp->dma_firstp;
293 wvp->ctxtp->dma_last_time = 0;
294 wvp->ctxtp->ixl_exec_depth = 0;
295 wvp->ctxtp->ixl_execp = NULL;
296
297 /* compile done */
298 }
299
300 /*
301 * hci1394_parse_ixl()
302 * Scan IXL program and build ohci DMA descriptor blocks in dma memory.
303 *
304 * Parse/process succeeding ixl commands until end of IXL linked list is
305 * reached. Evaluate ixl syntax and build (xmit or recv) descriptor
306 * blocks. To aid execution time evaluation of current location, enable
307 * status recording on each descriptor block built.
308 * On xmit, set sync & tag bits. On recv, optionally set wait for sync bit.
309 */
310 static void
hci1394_parse_ixl(hci1394_comp_ixl_vars_t * wvp,ixl1394_command_t * ixlp)311 hci1394_parse_ixl(hci1394_comp_ixl_vars_t *wvp, ixl1394_command_t *ixlp)
312 {
313 ixl1394_command_t *ixlnextp = ixlp; /* addr of next ixl cmd */
314 ixl1394_command_t *ixlcurp = NULL; /* addr of current ixl cmd */
315 uint16_t ixlopcode = 0; /* opcode of currnt ixl cmd */
316
317 uint32_t pktsize;
318 uint32_t pktcnt;
319
320 /* follow ixl links until reach end or find error */
321 while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
322
323 /* set this command as the current ixl command */
324 wvp->ixl_cur_cmdp = ixlcurp = ixlnextp;
325 ixlnextp = ixlcurp->next_ixlp;
326
327 ixlopcode = ixlcurp->ixl_opcode;
328
329 /* init compiler controlled values in current ixl command */
330 ixlcurp->compiler_privatep = NULL;
331 ixlcurp->compiler_resv = 0;
332
333 /* error if xmit/recv mode not appropriate for current cmd */
334 if ((((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) &&
335 ((ixlopcode & IXL1394_OPF_ONRECV) == 0)) ||
336 (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
337 ((ixlopcode & IXL1394_OPF_ONXMIT) == 0))) {
338
339 /* check if command op failed because it was invalid */
340 if (hci1394_is_opcode_valid(ixlopcode) != B_TRUE) {
341 wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
342 } else {
343 wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
344 }
345 continue;
346 }
347
348 /*
349 * if ends xfer flag set, finalize current xfer descriptor
350 * block build
351 */
352 if ((ixlopcode & IXL1394_OPF_ENDSXFER) != 0) {
353 /* finalize any descriptor block build in progress */
354 hci1394_finalize_cur_xfer_desc(wvp);
355
356 if (wvp->dma_bld_error != 0) {
357 continue;
358 }
359 }
360
361 /*
362 * now process based on specific opcode value
363 */
364 switch (ixlopcode) {
365
366 case IXL1394_OP_RECV_BUF:
367 case IXL1394_OP_RECV_BUF_U: {
368 ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
369
370 cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
371
372 /*
373 * In packet-per-buffer mode:
374 * This ixl command builds a collection of xfer
375 * descriptor blocks (size/pkt_size of them) each to
376 * recv a packet whose buffer size is pkt_size and
377 * whose buffer ptr is (pktcur*pkt_size + bufp)
378 *
379 * In buffer fill mode:
380 * This ixl command builds a single xfer descriptor
381 * block to recv as many packets or parts of packets
382 * as can fit into the buffer size specified
383 * (pkt_size is not used).
384 */
385
386 /* set xfer_state for new descriptor block build */
387 wvp->xfer_state = XFER_BUF;
388
389 /* set this ixl command as current xferstart command */
390 wvp->ixl_cur_xfer_stp = ixlcurp;
391
392 /*
393 * perform packet-per-buffer checks
394 * (no checks needed when in buffer fill mode)
395 */
396 if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) == 0) {
397
398 /* the packets must use the buffer exactly */
399 pktsize = cur_xfer_buf_ixlp->pkt_size;
400 pktcnt = 0;
401 if (pktsize != 0) {
402 pktcnt = cur_xfer_buf_ixlp->size /
403 pktsize;
404 }
405 if ((pktcnt == 0) || ((pktsize * pktcnt) !=
406 cur_xfer_buf_ixlp->size)) {
407 wvp->dma_bld_error =
408 IXL1394_EPKTSIZE_RATIO;
409 continue;
410 }
411 }
412
413 /*
414 * set buffer pointer & size into first xfer_bufp
415 * and xfer_size
416 */
417 if (hci1394_set_next_xfer_buf(wvp,
418 cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
419 cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
420
421 /* wvp->dma_bld_error is set by above call */
422 continue;
423 }
424 break;
425 }
426
427 case IXL1394_OP_RECV_PKT_ST:
428 case IXL1394_OP_RECV_PKT_ST_U: {
429 ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
430
431 cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
432
433 /* error if in buffer fill mode */
434 if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
435 wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
436 continue;
437 }
438
439 /* set xfer_state for new descriptor block build */
440 /* set this ixl command as current xferstart command */
441 wvp->xfer_state = XFER_PKT;
442 wvp->ixl_cur_xfer_stp = ixlcurp;
443
444 /*
445 * set buffer pointer & size into first xfer_bufp
446 * and xfer_size
447 */
448 if (hci1394_set_next_xfer_buf(wvp,
449 cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
450 cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
451
452 /* wvp->dma_bld_error is set by above call */
453 continue;
454 }
455 break;
456 }
457
458 case IXL1394_OP_RECV_PKT:
459 case IXL1394_OP_RECV_PKT_U: {
460 ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
461
462 cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
463
464 /* error if in buffer fill mode */
465 if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
466 wvp->dma_bld_error = IXL1394_EWRONG_XR_CMD_MODE;
467 continue;
468 }
469
470 /* error if xfer_state not xfer pkt */
471 if (wvp->xfer_state != XFER_PKT) {
472 wvp->dma_bld_error = IXL1394_EMISPLACED_RECV;
473 continue;
474 }
475
476 /*
477 * save xfer start cmd ixl ptr in compiler_privatep
478 * field of this cmd
479 */
480 ixlcurp->compiler_privatep = (void *)
481 wvp->ixl_cur_xfer_stp;
482
483 /*
484 * save pkt index [1-n] in compiler_resv field of
485 * this cmd
486 */
487 ixlcurp->compiler_resv = wvp->xfer_bufcnt;
488
489 /*
490 * set buffer pointer & size into next xfer_bufp
491 * and xfer_size
492 */
493 if (hci1394_set_next_xfer_buf(wvp,
494 cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
495 cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
496
497 /* wvp->dma_bld_error is set by above call */
498 continue;
499 }
500
501 /*
502 * set updateable xfer cache flush eval flag if
503 * updateable opcode
504 */
505 if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
506 wvp->xfer_hci_flush |= UPDATEABLE_XFER;
507 }
508 break;
509 }
510
511 case IXL1394_OP_SEND_BUF:
512 case IXL1394_OP_SEND_BUF_U: {
513 ixl1394_xfer_buf_t *cur_xfer_buf_ixlp;
514
515 cur_xfer_buf_ixlp = (ixl1394_xfer_buf_t *)ixlcurp;
516
517 /*
518 * These send_buf commands build a collection of xmit
519 * descriptor blocks (size/pkt_size of them) each to
520 * xfer a packet whose buffer size is pkt_size and whose
521 * buffer pt is (pktcur*pkt_size + bufp). (ptr and size
522 * are adjusted if they have header form of ixl cmd)
523 */
524
525 /* set xfer_state for new descriptor block build */
526 wvp->xfer_state = XFER_BUF;
527
528 /* set this ixl command as current xferstart command */
529 wvp->ixl_cur_xfer_stp = ixlcurp;
530
531 /* the packets must use the buffer exactly,else error */
532 pktsize = cur_xfer_buf_ixlp->pkt_size;
533 pktcnt = 0;
534 if (pktsize != 0) {
535 pktcnt = cur_xfer_buf_ixlp->size / pktsize;
536 }
537 if ((pktcnt == 0) || ((pktsize * pktcnt) !=
538 cur_xfer_buf_ixlp->size)) {
539 wvp->dma_bld_error = IXL1394_EPKTSIZE_RATIO;
540 continue;
541 }
542
543 /* set buf ptr & size into 1st xfer_bufp & xfer_size */
544 if (hci1394_set_next_xfer_buf(wvp,
545 cur_xfer_buf_ixlp->ixl_buf.ixldmac_addr,
546 cur_xfer_buf_ixlp->size) != DDI_SUCCESS) {
547
548 /* wvp->dma_bld_error is set by above call */
549 continue;
550 }
551 break;
552 }
553
554 case IXL1394_OP_SEND_PKT_ST:
555 case IXL1394_OP_SEND_PKT_ST_U: {
556 ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
557
558 cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
559
560 /* set xfer_state for new descriptor block build */
561 /* set this ixl command as current xferstart command */
562 wvp->xfer_state = XFER_PKT;
563 wvp->ixl_cur_xfer_stp = ixlcurp;
564
565 /*
566 * set buffer pointer & size into first xfer_bufp and
567 * xfer_size
568 */
569 if (hci1394_set_next_xfer_buf(wvp,
570 cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
571 cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
572
573 /* wvp->dma_bld_error is set by above call */
574 continue;
575 }
576 break;
577 }
578
579 case IXL1394_OP_SEND_PKT_WHDR_ST:
580 case IXL1394_OP_SEND_PKT_WHDR_ST_U: {
581 ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
582
583 cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
584
585 /* set xfer_state for new descriptor block build */
586 /* set this ixl command as current xferstart command */
587 wvp->xfer_state = XFER_PKT;
588 wvp->ixl_cur_xfer_stp = ixlcurp;
589
590 /*
591 * buffer size must be at least 4 (must include header),
592 * else error
593 */
594 if (cur_xfer_pkt_ixlp->size < 4) {
595 wvp->dma_bld_error = IXL1394_EPKT_HDR_MISSING;
596 continue;
597 }
598
599 /*
600 * set buffer and size(excluding header) into first
601 * xfer_bufp and xfer_size
602 */
603 if (hci1394_set_next_xfer_buf(wvp,
604 cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr + 4,
605 cur_xfer_pkt_ixlp->size - 4) != DDI_SUCCESS) {
606
607 /* wvp->dma_bld_error is set by above call */
608 continue;
609 }
610 break;
611 }
612
613 case IXL1394_OP_SEND_PKT:
614 case IXL1394_OP_SEND_PKT_U: {
615 ixl1394_xfer_pkt_t *cur_xfer_pkt_ixlp;
616
617 cur_xfer_pkt_ixlp = (ixl1394_xfer_pkt_t *)ixlcurp;
618
619 /* error if xfer_state not xfer pkt */
620 if (wvp->xfer_state != XFER_PKT) {
621 wvp->dma_bld_error = IXL1394_EMISPLACED_SEND;
622 continue;
623 }
624
625 /*
626 * save xfer start cmd ixl ptr in compiler_privatep
627 * field of this cmd
628 */
629 ixlcurp->compiler_privatep = (void *)
630 wvp->ixl_cur_xfer_stp;
631
632 /*
633 * save pkt index [1-n] in compiler_resv field of this
634 * cmd
635 */
636 ixlcurp->compiler_resv = wvp->xfer_bufcnt;
637
638 /*
639 * set buffer pointer & size into next xfer_bufp
640 * and xfer_size
641 */
642 if (hci1394_set_next_xfer_buf(wvp,
643 cur_xfer_pkt_ixlp->ixl_buf.ixldmac_addr,
644 cur_xfer_pkt_ixlp->size) != DDI_SUCCESS) {
645
646 /* wvp->dma_bld_error is set by above call */
647 continue;
648 }
649
650 /*
651 * set updateable xfer cache flush eval flag if
652 * updateable opcode
653 */
654 if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
655 wvp->xfer_hci_flush |= UPDATEABLE_XFER;
656 }
657 break;
658 }
659
660 case IXL1394_OP_SEND_HDR_ONLY:
661 /* set xfer_state for new descriptor block build */
662 wvp->xfer_state = XMIT_HDRONLY;
663
664 /* set this ixl command as current xferstart command */
665 wvp->ixl_cur_xfer_stp = ixlcurp;
666 break;
667
668 case IXL1394_OP_SEND_NO_PKT:
669 /* set xfer_state for new descriptor block build */
670 wvp->xfer_state = XMIT_NOPKT;
671
672 /* set this ixl command as current xferstart command */
673 wvp->ixl_cur_xfer_stp = ixlcurp;
674 break;
675
676 case IXL1394_OP_JUMP:
677 case IXL1394_OP_JUMP_U: {
678 ixl1394_jump_t *cur_jump_ixlp;
679
680 cur_jump_ixlp = (ixl1394_jump_t *)ixlcurp;
681
682 /*
683 * verify label indicated by IXL1394_OP_JUMP is
684 * actually an IXL1394_OP_LABEL or NULL
685 */
686 if ((cur_jump_ixlp->label != NULL) &&
687 (cur_jump_ixlp->label->ixl_opcode !=
688 IXL1394_OP_LABEL)) {
689 wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
690 continue;
691 }
692 break;
693 }
694
695 case IXL1394_OP_LABEL:
696 /*
697 * save current ixl label command for xfer cmd
698 * finalize processing
699 */
700 wvp->ixl_cur_labelp = ixlcurp;
701
702 /* set initiating label flag to cause cache flush */
703 wvp->xfer_hci_flush |= INITIATING_LBL;
704 break;
705
706 case IXL1394_OP_CALLBACK:
707 case IXL1394_OP_CALLBACK_U:
708 case IXL1394_OP_STORE_TIMESTAMP:
709 /*
710 * these commands are accepted during compile,
711 * processed during execution (interrupt handling)
712 * No further processing is needed here.
713 */
714 break;
715
716 case IXL1394_OP_SET_SKIPMODE:
717 case IXL1394_OP_SET_SKIPMODE_U:
718 /*
719 * Error if already have a set skipmode cmd for
720 * this xfer
721 */
722 if (wvp->ixl_setskipmode_cmdp != NULL) {
723 wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
724 continue;
725 }
726
727 /* save skip mode ixl command and verify skipmode */
728 wvp->ixl_setskipmode_cmdp = (ixl1394_set_skipmode_t *)
729 ixlcurp;
730
731 if ((wvp->ixl_setskipmode_cmdp->skipmode !=
732 IXL1394_SKIP_TO_NEXT) &&
733 (wvp->ixl_setskipmode_cmdp->skipmode !=
734 IXL1394_SKIP_TO_SELF) &&
735 (wvp->ixl_setskipmode_cmdp->skipmode !=
736 IXL1394_SKIP_TO_STOP) &&
737 (wvp->ixl_setskipmode_cmdp->skipmode !=
738 IXL1394_SKIP_TO_LABEL)) {
739 wvp->dma_bld_error = IXL1394_EBAD_SKIPMODE;
740 continue;
741 }
742
743 /*
744 * if mode is IXL1394_SKIP_TO_LABEL, verify label
745 * references an IXL1394_OP_LABEL
746 */
747 if ((wvp->ixl_setskipmode_cmdp->skipmode ==
748 IXL1394_SKIP_TO_LABEL) &&
749 ((wvp->ixl_setskipmode_cmdp->label == NULL) ||
750 (wvp->ixl_setskipmode_cmdp->label->ixl_opcode !=
751 IXL1394_OP_LABEL))) {
752 wvp->dma_bld_error = IXL1394_EJUMP_NOT_TO_LABEL;
753 continue;
754 }
755 /*
756 * set updateable set cmd cache flush eval flag if
757 * updateable opcode
758 */
759 if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
760 wvp->xfer_hci_flush |= UPDATEABLE_SET;
761 }
762 break;
763
764 case IXL1394_OP_SET_TAGSYNC:
765 case IXL1394_OP_SET_TAGSYNC_U:
766 /*
767 * is an error if already have a set tag and sync cmd
768 * for this xfer
769 */
770 if (wvp->ixl_settagsync_cmdp != NULL) {
771 wvp->dma_bld_error = IXL1394_EDUPLICATE_SET_CMD;
772 continue;
773 }
774
775 /* save ixl command containing tag and sync values */
776 wvp->ixl_settagsync_cmdp =
777 (ixl1394_set_tagsync_t *)ixlcurp;
778
779 /*
780 * set updateable set cmd cache flush eval flag if
781 * updateable opcode
782 */
783 if ((ixlopcode & IXL1394_OPF_UPDATE) != 0) {
784 wvp->xfer_hci_flush |= UPDATEABLE_SET;
785 }
786 break;
787
788 case IXL1394_OP_SET_SYNCWAIT:
789 /*
790 * count ixl wait-for-sync commands since last
791 * finalize ignore multiple occurrences for same xfer
792 * command
793 */
794 wvp->ixl_setsyncwait_cnt++;
795 break;
796
797 default:
798 /* error - unknown/unimplemented ixl command */
799 wvp->dma_bld_error = IXL1394_EBAD_IXL_OPCODE;
800 continue;
801 }
802 } /* while */
803
804 /* finalize any last descriptor block build */
805 wvp->ixl_cur_cmdp = NULL;
806 if (wvp->dma_bld_error == 0) {
807 hci1394_finalize_cur_xfer_desc(wvp);
808 }
809 }
810
811 /*
812 * hci1394_finalize_all_xfer_desc()
813 * Pass 2: Scan IXL resolving all dma descriptor jump and skip addresses.
814 *
815 * Set interrupt enable on first descriptor block associated with current
816 * xfer IXL command if current IXL xfer was introduced by an IXL label cmnd.
817 *
818 * Set interrupt enable on last descriptor block associated with current xfer
819 * IXL command if any callback ixl commands are found on the execution path
820 * between the current and the next xfer ixl command. (Previously, this
821 * applied to store timestamp ixl commands, as well.)
822 */
823 static void
hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t * wvp)824 hci1394_finalize_all_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
825 {
826 ixl1394_command_t *ixlcurp; /* current ixl command */
827 ixl1394_command_t *ixlnextp; /* next ixl command */
828 ixl1394_command_t *ixlexecnext;
829 hci1394_xfer_ctl_t *xferctl_curp;
830 hci1394_xfer_ctl_t *xferctl_nxtp;
831 hci1394_desc_t *hcidescp;
832 ddi_acc_handle_t acc_hdl;
833 uint32_t temp;
834 uint32_t dma_execnext_addr;
835 uint32_t dma_skiplabel_addr;
836 uint32_t dma_skip_addr;
837 uint32_t callback_cnt;
838 uint16_t repcnt;
839 uint16_t ixlopcode;
840 int ii;
841 int err;
842
843 /*
844 * If xmit mode and if default skipmode is skip to label -
845 * follow exec path starting at default skipmode label until
846 * find the first ixl xfer command which is to be executed.
847 * Set its address into default_skipxferp.
848 */
849 if (((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) == 0) &&
850 (wvp->ctxtp->default_skipmode == IXL1394_SKIP_TO_LABEL)) {
851
852 err = hci1394_ixl_find_next_exec_xfer(wvp->default_skiplabelp,
853 NULL, &wvp->default_skipxferp);
854 if (err == DDI_FAILURE) {
855 wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
856 return;
857 }
858 }
859
860 /* set first ixl cmd */
861 ixlnextp = wvp->ctxtp->ixl_firstp;
862
863 /* follow ixl links until reach end or find error */
864 while ((ixlnextp != NULL) && (wvp->dma_bld_error == 0)) {
865
866 /* set this command as the current ixl command */
867 ixlcurp = ixlnextp;
868 ixlnextp = ixlcurp->next_ixlp;
869
870 /* get command opcode removing unneeded update flag */
871 ixlopcode = ixlcurp->ixl_opcode & ~IXL1394_OPF_UPDATE;
872
873 /*
874 * Scan for next ixl xfer start command (including this one),
875 * along ixl link path. Once xfer command found, find next IXL
876 * xfer cmd along execution path and fill in branch address of
877 * current xfer command. If is composite ixl xfer command, first
878 * link forward branch dma addresses of each descriptor block in
879 * composite, until reach final one then set its branch address
880 * to next execution path xfer found. Next determine skip mode
881 * and fill in skip address(es) appropriately.
882 */
883 /* skip to next if not xfer start ixl command */
884 if (((ixlopcode & IXL1394_OPF_ISXFER) == 0) ||
885 ((ixlopcode & IXL1394_OPTY_MASK) == 0)) {
886 continue;
887 }
888
889 /*
890 * get xfer_ctl structure and composite repeat count for current
891 * IXL xfer cmd
892 */
893 xferctl_curp = (hci1394_xfer_ctl_t *)ixlcurp->compiler_privatep;
894 repcnt = xferctl_curp->cnt;
895
896 /*
897 * if initiated by an IXL label command, set interrupt enable
898 * flag into last component of first descriptor block of
899 * current IXL xfer cmd
900 */
901 if ((xferctl_curp->ctl_flags & XCTL_LABELLED) != 0) {
902 hcidescp = (hci1394_desc_t *)
903 xferctl_curp->dma[0].dma_descp;
904 acc_hdl = xferctl_curp->dma[0].dma_buf->bi_handle;
905 temp = ddi_get32(acc_hdl, &hcidescp->hdr);
906 temp |= DESC_INTR_ENBL;
907 ddi_put32(acc_hdl, &hcidescp->hdr, temp);
908 }
909
910 /* find next xfer IXL cmd by following execution path */
911 err = hci1394_ixl_find_next_exec_xfer(ixlcurp->next_ixlp,
912 &callback_cnt, &ixlexecnext);
913
914 /* if label<->jump loop detected, return error */
915 if (err == DDI_FAILURE) {
916 wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
917 continue;
918 }
919
920 /* link current IXL's xfer_ctl to next xfer IXL on exec path */
921 xferctl_curp->execp = ixlexecnext;
922
923 /*
924 * if callbacks have been seen during execution path scan,
925 * set interrupt enable flag into last descriptor of last
926 * descriptor block of current IXL xfer cmd
927 */
928 if (callback_cnt != 0) {
929 hcidescp = (hci1394_desc_t *)
930 xferctl_curp->dma[repcnt - 1].dma_descp;
931 acc_hdl =
932 xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
933 temp = ddi_get32(acc_hdl, &hcidescp->hdr);
934 temp |= DESC_INTR_ENBL;
935 ddi_put32(acc_hdl, &hcidescp->hdr, temp);
936 }
937
938 /*
939 * obtain dma bound addr of next exec path IXL xfer command,
940 * if any
941 */
942 dma_execnext_addr = 0;
943
944 if (ixlexecnext != NULL) {
945 xferctl_nxtp = (hci1394_xfer_ctl_t *)
946 ixlexecnext->compiler_privatep;
947 dma_execnext_addr = xferctl_nxtp->dma[0].dma_bound;
948 } else {
949 /*
950 * If this is last descriptor (next == NULL), then
951 * make sure the interrupt bit is enabled. This
952 * way we can ensure that we are notified when the
953 * descriptor chain processing has come to an end.
954 */
955 hcidescp = (hci1394_desc_t *)
956 xferctl_curp->dma[repcnt - 1].dma_descp;
957 acc_hdl =
958 xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
959 temp = ddi_get32(acc_hdl, &hcidescp->hdr);
960 temp |= DESC_INTR_ENBL;
961 ddi_put32(acc_hdl, &hcidescp->hdr, temp);
962 }
963
964 /*
965 * set jump address of final cur IXL xfer cmd to addr next
966 * IXL xfer cmd
967 */
968 hcidescp = (hci1394_desc_t *)
969 xferctl_curp->dma[repcnt - 1].dma_descp;
970 acc_hdl = xferctl_curp->dma[repcnt - 1].dma_buf->bi_handle;
971 ddi_put32(acc_hdl, &hcidescp->branch, dma_execnext_addr);
972
973 /*
974 * if a composite object, forward link initial jump
975 * dma addresses
976 */
977 for (ii = 0; ii < repcnt - 1; ii++) {
978 hcidescp = (hci1394_desc_t *)
979 xferctl_curp->dma[ii].dma_descp;
980 acc_hdl = xferctl_curp->dma[ii].dma_buf->bi_handle;
981 ddi_put32(acc_hdl, &hcidescp->branch,
982 xferctl_curp->dma[ii + 1].dma_bound);
983 }
984
985 /*
986 * fill in skip address(es) for all descriptor blocks belonging
987 * to current IXL xfer command; note:skip addresses apply only
988 * to xmit mode commands
989 */
990 if ((ixlopcode & IXL1394_OPF_ONXMIT) != 0) {
991
992 /* first obtain and set skip mode information */
993 wvp->ixl_setskipmode_cmdp = xferctl_curp->skipmodep;
994 hci1394_set_xmit_skip_mode(wvp);
995
996 /*
997 * if skip to label,init dma bound addr to be
998 * 1st xfer cmd after label
999 */
1000 dma_skiplabel_addr = 0;
1001 if ((wvp->skipmode == IXL1394_SKIP_TO_LABEL) &&
1002 (wvp->skipxferp != NULL)) {
1003 xferctl_nxtp = (hci1394_xfer_ctl_t *)
1004 wvp->skipxferp->compiler_privatep;
1005 dma_skiplabel_addr =
1006 xferctl_nxtp->dma[0].dma_bound;
1007 }
1008
1009 /*
1010 * set skip addrs for each descriptor blk at this
1011 * xfer start IXL cmd
1012 */
1013 for (ii = 0; ii < repcnt; ii++) {
1014 switch (wvp->skipmode) {
1015
1016 case IXL1394_SKIP_TO_LABEL:
1017 /* set dma bound address - label */
1018 dma_skip_addr = dma_skiplabel_addr;
1019 break;
1020
1021 case IXL1394_SKIP_TO_NEXT:
1022 /* set dma bound address - next */
1023 if (ii < repcnt - 1) {
1024 dma_skip_addr = xferctl_curp->
1025 dma[ii + 1].dma_bound;
1026 } else {
1027 dma_skip_addr =
1028 dma_execnext_addr;
1029 }
1030 break;
1031
1032 case IXL1394_SKIP_TO_SELF:
1033 /* set dma bound address - self */
1034 dma_skip_addr =
1035 xferctl_curp->dma[ii].dma_bound;
1036 break;
1037
1038 case IXL1394_SKIP_TO_STOP:
1039 default:
1040 /* set dma bound address - stop */
1041 dma_skip_addr = 0;
1042 break;
1043 }
1044
1045 /*
1046 * determine address of first descriptor of
1047 * current descriptor block by adjusting addr of
1048 * last descriptor of current descriptor block
1049 */
1050 hcidescp = ((hci1394_desc_t *)
1051 xferctl_curp->dma[ii].dma_descp);
1052 acc_hdl =
1053 xferctl_curp->dma[ii].dma_buf->bi_handle;
1054
1055 /*
1056 * adjust by count of descriptors in this desc
1057 * block not including the last one (size of
1058 * descriptor)
1059 */
1060 hcidescp -= ((xferctl_curp->dma[ii].dma_bound &
1061 DESC_Z_MASK) - 1);
1062
1063 /*
1064 * adjust further if the last descriptor is
1065 * double sized
1066 */
1067 if (ixlopcode == IXL1394_OP_SEND_HDR_ONLY) {
1068 hcidescp++;
1069 }
1070 /*
1071 * now set skip address into first descriptor
1072 * of descriptor block
1073 */
1074 ddi_put32(acc_hdl, &hcidescp->branch,
1075 dma_skip_addr);
1076 } /* for */
1077 } /* if */
1078 } /* while */
1079 }
1080
1081 /*
1082 * hci1394_finalize_cur_xfer_desc()
1083 * Build the openHCI descriptor for a packet or buffer based on info
1084 * currently collected into the working vars struct (wvp). After some
1085 * checks, this routine dispatches to the appropriate descriptor block
1086 * build (bld) routine for the packet or buf type.
1087 */
1088 static void
hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t * wvp)1089 hci1394_finalize_cur_xfer_desc(hci1394_comp_ixl_vars_t *wvp)
1090 {
1091 uint16_t ixlopcode;
1092 uint16_t ixlopraw;
1093
1094 /* extract opcode from current IXL cmd (if any) */
1095 if (wvp->ixl_cur_cmdp != NULL) {
1096 ixlopcode = wvp->ixl_cur_cmdp->ixl_opcode;
1097 ixlopraw = ixlopcode & ~IXL1394_OPF_UPDATE;
1098 } else {
1099 ixlopcode = ixlopraw = IXL1394_OP_INVALID;
1100 }
1101
1102 /*
1103 * if no xfer descriptor block being built, perform validity checks
1104 */
1105 if (wvp->xfer_state == XFER_NONE) {
1106 /*
1107 * error if being finalized by IXL1394_OP_LABEL or
1108 * IXL1394_OP_JUMP or if at end, and have an unapplied
1109 * IXL1394_OP_SET_TAGSYNC, IXL1394_OP_SET_SKIPMODE or
1110 * IXL1394_OP_SET_SYNCWAIT
1111 */
1112 if ((ixlopraw == IXL1394_OP_JUMP) ||
1113 (ixlopraw == IXL1394_OP_LABEL) ||
1114 (wvp->ixl_cur_cmdp == NULL) ||
1115 (wvp->ixl_cur_cmdp->next_ixlp == NULL)) {
1116 if ((wvp->ixl_settagsync_cmdp != NULL) ||
1117 (wvp->ixl_setskipmode_cmdp != NULL) ||
1118 (wvp->ixl_setsyncwait_cnt != 0)) {
1119 wvp->dma_bld_error = IXL1394_EUNAPPLIED_SET_CMD;
1120 return;
1121 }
1122 }
1123
1124 /* error if finalize is due to updateable jump cmd */
1125 if (ixlopcode == IXL1394_OP_JUMP_U) {
1126 wvp->dma_bld_error = IXL1394_EUPDATE_DISALLOWED;
1127 return;
1128 }
1129
1130 /* no error, no xfer */
1131 return;
1132 }
1133
1134 /*
1135 * finalize current xfer descriptor block being built
1136 */
1137
1138 /* count IXL xfer start command for descriptor block being built */
1139 wvp->ixl_xfer_st_cnt++;
1140
1141 /*
1142 * complete setting of cache flush evaluation flags; flags will already
1143 * have been set by updateable set cmds and non-start xfer pkt cmds
1144 */
1145 /* now set cache flush flag if current xfer start cmnd is updateable */
1146 if ((wvp->ixl_cur_xfer_stp->ixl_opcode & IXL1394_OPF_UPDATE) != 0) {
1147 wvp->xfer_hci_flush |= UPDATEABLE_XFER;
1148 }
1149 /*
1150 * also set cache flush flag if xfer being finalized by
1151 * updateable jump cmd
1152 */
1153 if ((ixlopcode == IXL1394_OP_JUMP_U) != 0) {
1154 wvp->xfer_hci_flush |= UPDATEABLE_JUMP;
1155 }
1156
1157 /*
1158 * Determine if cache flush required before building next descriptor
1159 * block. If xfer pkt command and any cache flush flags are set,
1160 * hci flush needed.
1161 * If buffer or special xfer command and xfer command is updateable or
1162 * an associated set command is updateable, hci flush is required now.
1163 * If a single-xfer buffer or special xfer command is finalized by
1164 * updateable jump command, hci flush is required now.
1165 * Note: a cache flush will be required later, before the last
1166 * descriptor block of a multi-xfer set of descriptor blocks is built,
1167 * if this (non-pkt) xfer is finalized by an updateable jump command.
1168 */
1169 if (wvp->xfer_hci_flush != 0) {
1170 if (((wvp->ixl_cur_xfer_stp->ixl_opcode &
1171 IXL1394_OPTY_XFER_PKT_ST) != 0) || ((wvp->xfer_hci_flush &
1172 (UPDATEABLE_XFER | UPDATEABLE_SET | INITIATING_LBL)) !=
1173 0)) {
1174 if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
1175 /* wvp->dma_bld_error is set by above call */
1176 return;
1177 }
1178 }
1179 }
1180
1181 /*
1182 * determine which kind of descriptor block to build based on
1183 * xfer state - hdr only, skip cycle, pkt or buf.
1184 */
1185 switch (wvp->xfer_state) {
1186
1187 case XFER_PKT:
1188 if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
1189 hci1394_bld_recv_pkt_desc(wvp);
1190 } else {
1191 hci1394_bld_xmit_pkt_desc(wvp);
1192 }
1193 break;
1194
1195 case XFER_BUF:
1196 if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_RECV) != 0) {
1197 if ((wvp->ixl_io_mode & HCI1394_ISO_CTXT_BFFILL) != 0) {
1198 hci1394_bld_recv_buf_fill_desc(wvp);
1199 } else {
1200 hci1394_bld_recv_buf_ppb_desc(wvp);
1201 }
1202 } else {
1203 hci1394_bld_xmit_buf_desc(wvp);
1204 }
1205 break;
1206
1207 case XMIT_HDRONLY:
1208 case XMIT_NOPKT:
1209 hci1394_bld_xmit_hdronly_nopkt_desc(wvp);
1210 break;
1211
1212 default:
1213 /* internal compiler error */
1214 wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
1215 }
1216
1217 /* return if error */
1218 if (wvp->dma_bld_error != 0) {
1219 /* wvp->dma_bld_error is set by above call */
1220 return;
1221 }
1222
1223 /*
1224 * if was finalizing IXL jump cmd, set compiler_privatep to
1225 * cur xfer IXL cmd
1226 */
1227 if (ixlopraw == IXL1394_OP_JUMP) {
1228 wvp->ixl_cur_cmdp->compiler_privatep =
1229 (void *)wvp->ixl_cur_xfer_stp;
1230 }
1231
1232 /* if cur xfer IXL initiated by IXL label cmd, set flag in xfer_ctl */
1233 if (wvp->ixl_cur_labelp != NULL) {
1234 ((hci1394_xfer_ctl_t *)
1235 (wvp->ixl_cur_xfer_stp->compiler_privatep))->ctl_flags |=
1236 XCTL_LABELLED;
1237 wvp->ixl_cur_labelp = NULL;
1238 }
1239
1240 /*
1241 * set any associated IXL set skipmode cmd into xfer_ctl of
1242 * cur xfer IXL cmd
1243 */
1244 if (wvp->ixl_setskipmode_cmdp != NULL) {
1245 ((hci1394_xfer_ctl_t *)
1246 (wvp->ixl_cur_xfer_stp->compiler_privatep))->skipmodep =
1247 wvp->ixl_setskipmode_cmdp;
1248 }
1249
1250 /* set no current xfer start cmd */
1251 wvp->ixl_cur_xfer_stp = NULL;
1252
1253 /* set no current set tag&sync, set skipmode or set syncwait commands */
1254 wvp->ixl_settagsync_cmdp = NULL;
1255 wvp->ixl_setskipmode_cmdp = NULL;
1256 wvp->ixl_setsyncwait_cnt = 0;
1257
1258 /* set no currently active descriptor blocks */
1259 wvp->descriptors = 0;
1260
1261 /* reset total packet length and buffers count */
1262 wvp->xfer_pktlen = 0;
1263 wvp->xfer_bufcnt = 0;
1264
1265 /* reset flush cache evaluation flags */
1266 wvp->xfer_hci_flush = 0;
1267
1268 /* set no xmit descriptor block being built */
1269 wvp->xfer_state = XFER_NONE;
1270 }
1271
1272 /*
1273 * hci1394_bld_recv_pkt_desc()
1274 * Used to create the openHCI dma descriptor block(s) for a receive packet.
1275 */
1276 static void
hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t * wvp)1277 hci1394_bld_recv_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
1278 {
1279 hci1394_xfer_ctl_t *xctlp;
1280 caddr_t dma_descp;
1281 uint32_t dma_desc_bound;
1282 uint32_t wait_for_sync;
1283 uint32_t ii;
1284 hci1394_desc_t *wv_descp; /* shorthand to local descrpt */
1285
1286 /*
1287 * is error if number of descriptors to be built exceeds maximum
1288 * descriptors allowed in a descriptor block.
1289 */
1290 if ((wvp->descriptors + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
1291 wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
1292 return;
1293 }
1294
1295 /* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
1296 if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1297 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1298 return;
1299 }
1300
1301 /*
1302 * save xfer_ctl struct addr in compiler_privatep of
1303 * current IXL xfer cmd
1304 */
1305 wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1306
1307 /*
1308 * if enabled, set wait for sync flag in first descriptor of
1309 * descriptor block
1310 */
1311 if (wvp->ixl_setsyncwait_cnt > 0) {
1312 wvp->ixl_setsyncwait_cnt = 1;
1313 wait_for_sync = DESC_W_ENBL;
1314 } else {
1315 wait_for_sync = DESC_W_DSABL;
1316 }
1317
1318 /* create descriptor block for this recv packet (xfer status enabled) */
1319 for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
1320 wv_descp = &wvp->descriptor_block[wvp->descriptors];
1321
1322 if (ii == (wvp->xfer_bufcnt - 1)) {
1323 HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL,
1324 DESC_INTR_DSABL, wait_for_sync, wvp->xfer_size[ii]);
1325 } else {
1326 HCI1394_INIT_IR_PPB_IMORE(wv_descp, wait_for_sync,
1327 wvp->xfer_size[ii]);
1328 }
1329 wv_descp->data_addr = wvp->xfer_bufp[ii];
1330 wv_descp->branch = 0;
1331 wv_descp->status = (wvp->xfer_size[ii] <<
1332 DESC_ST_RESCOUNT_SHIFT) & DESC_ST_RESCOUNT_MASK;
1333 wvp->descriptors++;
1334 }
1335
1336 /* allocate and copy descriptor block to dma memory */
1337 if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
1338 DDI_SUCCESS) {
1339 /* wvp->dma_bld_error is set by above function call */
1340 return;
1341 }
1342
1343 /*
1344 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1345 * is last component)
1346 */
1347 xctlp->dma[0].dma_bound = dma_desc_bound;
1348 xctlp->dma[0].dma_descp =
1349 dma_descp + (wvp->xfer_bufcnt - 1) * sizeof (hci1394_desc_t);
1350 xctlp->dma[0].dma_buf = &wvp->dma_currentp->mem;
1351 }
1352
1353 /*
1354 * hci1394_bld_recv_buf_ppb_desc()
1355 * Used to create the openHCI dma descriptor block(s) for a receive buf
1356 * in packet per buffer mode.
1357 */
1358 static void
hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t * wvp)1359 hci1394_bld_recv_buf_ppb_desc(hci1394_comp_ixl_vars_t *wvp)
1360 {
1361 hci1394_xfer_ctl_t *xctlp;
1362 ixl1394_xfer_buf_t *local_ixl_cur_xfer_stp;
1363 caddr_t dma_descp;
1364 uint32_t dma_desc_bound;
1365 uint32_t pktsize;
1366 uint32_t pktcnt;
1367 uint32_t wait_for_sync;
1368 uint32_t ii;
1369 hci1394_desc_t *wv_descp; /* shorthand to local descriptor */
1370
1371 local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1372
1373 /* determine number and size of pkt desc blocks to create */
1374 pktsize = local_ixl_cur_xfer_stp->pkt_size;
1375 pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
1376
1377 /* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
1378 if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
1379 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1380 return;
1381 }
1382
1383 /*
1384 * save xfer_ctl struct addr in compiler_privatep of
1385 * current IXL xfer cmd
1386 */
1387 local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1388
1389 /*
1390 * if enabled, set wait for sync flag in first descriptor in
1391 * descriptor block
1392 */
1393 if (wvp->ixl_setsyncwait_cnt > 0) {
1394 wvp->ixl_setsyncwait_cnt = 1;
1395 wait_for_sync = DESC_W_ENBL;
1396 } else {
1397 wait_for_sync = DESC_W_DSABL;
1398 }
1399
1400 /* create first descriptor block for this recv packet */
1401 /* consists of one descriptor and xfer status is enabled */
1402 wv_descp = &wvp->descriptor_block[wvp->descriptors];
1403 HCI1394_INIT_IR_PPB_ILAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
1404 wait_for_sync, pktsize);
1405 wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
1406 wv_descp->branch = 0;
1407 wv_descp->status = (pktsize << DESC_ST_RESCOUNT_SHIFT) &
1408 DESC_ST_RESCOUNT_MASK;
1409 wvp->descriptors++;
1410
1411 /*
1412 * generate as many contiguous descriptor blocks as there are
1413 * recv pkts
1414 */
1415 for (ii = 0; ii < pktcnt; ii++) {
1416
1417 /* if about to create last descriptor block */
1418 if (ii == (pktcnt - 1)) {
1419 /* check and perform any required hci cache flush */
1420 if (hci1394_flush_end_desc_check(wvp, ii) !=
1421 DDI_SUCCESS) {
1422 /* wvp->dma_bld_error is set by above call */
1423 return;
1424 }
1425 }
1426
1427 /* allocate and copy descriptor block to dma memory */
1428 if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
1429 &dma_desc_bound) != DDI_SUCCESS) {
1430 /* wvp->dma_bld_error is set by above call */
1431 return;
1432 }
1433
1434 /*
1435 * set dma addrs into xfer_ctl struct (unbound addr (kernel
1436 * virtual) is last component (descriptor))
1437 */
1438 xctlp->dma[ii].dma_bound = dma_desc_bound;
1439 xctlp->dma[ii].dma_descp = dma_descp;
1440 xctlp->dma[ii].dma_buf = &wvp->dma_currentp->mem;
1441
1442 /* advance buffer ptr by pktsize in descriptor block */
1443 wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
1444 pktsize;
1445 }
1446 }
1447
1448 /*
1449 * hci1394_bld_recv_buf_fill_desc()
1450 * Used to create the openHCI dma descriptor block(s) for a receive buf
1451 * in buffer fill mode.
1452 */
1453 static void
hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t * wvp)1454 hci1394_bld_recv_buf_fill_desc(hci1394_comp_ixl_vars_t *wvp)
1455 {
1456 hci1394_xfer_ctl_t *xctlp;
1457 caddr_t dma_descp;
1458 uint32_t dma_desc_bound;
1459 uint32_t wait_for_sync;
1460 ixl1394_xfer_buf_t *local_ixl_cur_xfer_stp;
1461
1462 local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1463
1464
1465 /* allocate an xfer_ctl struct including 1 xfer_ctl_dma structs */
1466 if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1467 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1468 return;
1469 }
1470
1471 /*
1472 * save xfer_ctl struct addr in compiler_privatep of
1473 * current IXL xfer cmd
1474 */
1475 local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1476
1477 /*
1478 * if enabled, set wait for sync flag in first descriptor of
1479 * descriptor block
1480 */
1481 if (wvp->ixl_setsyncwait_cnt > 0) {
1482 wvp->ixl_setsyncwait_cnt = 1;
1483 wait_for_sync = DESC_W_ENBL;
1484 } else {
1485 wait_for_sync = DESC_W_DSABL;
1486 }
1487
1488 /*
1489 * create descriptor block for this buffer fill mode recv command which
1490 * consists of one descriptor with xfer status enabled
1491 */
1492 HCI1394_INIT_IR_BF_IMORE(&wvp->descriptor_block[wvp->descriptors],
1493 DESC_INTR_DSABL, wait_for_sync, local_ixl_cur_xfer_stp->size);
1494
1495 wvp->descriptor_block[wvp->descriptors].data_addr =
1496 local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
1497 wvp->descriptor_block[wvp->descriptors].branch = 0;
1498 wvp->descriptor_block[wvp->descriptors].status =
1499 (local_ixl_cur_xfer_stp->size << DESC_ST_RESCOUNT_SHIFT) &
1500 DESC_ST_RESCOUNT_MASK;
1501 wvp->descriptors++;
1502
1503 /* check and perform any required hci cache flush */
1504 if (hci1394_flush_end_desc_check(wvp, 0) != DDI_SUCCESS) {
1505 /* wvp->dma_bld_error is set by above call */
1506 return;
1507 }
1508
1509 /* allocate and copy descriptor block to dma memory */
1510 if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound)
1511 != DDI_SUCCESS) {
1512 /* wvp->dma_bld_error is set by above call */
1513 return;
1514 }
1515
1516 /*
1517 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1518 * is last component.
1519 */
1520 xctlp->dma[0].dma_bound = dma_desc_bound;
1521 xctlp->dma[0].dma_descp = dma_descp;
1522 xctlp->dma[0].dma_buf = &wvp->dma_currentp->mem;
1523 }
1524
1525 /*
1526 * hci1394_bld_xmit_pkt_desc()
1527 * Used to create the openHCI dma descriptor block(s) for a transmit packet.
1528 */
1529 static void
hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t * wvp)1530 hci1394_bld_xmit_pkt_desc(hci1394_comp_ixl_vars_t *wvp)
1531 {
1532 hci1394_xfer_ctl_t *xctlp;
1533 hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
1534 hci1394_desc_t *wv_descp; /* shorthand to local descriptor */
1535 caddr_t dma_descp; /* dma bound memory for descriptor */
1536 uint32_t dma_desc_bound;
1537 uint32_t ii;
1538
1539 /*
1540 * is error if number of descriptors to be built exceeds maximum
1541 * descriptors allowed in a descriptor block. Add 2 for the overhead
1542 * of the OMORE-Immediate.
1543 */
1544 if ((wvp->descriptors + 2 + wvp->xfer_bufcnt) > HCI1394_DESC_MAX_Z) {
1545 wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
1546 return;
1547 }
1548
1549 /* is error if total packet length exceeds 0xFFFF */
1550 if (wvp->xfer_pktlen > 0xFFFF) {
1551 wvp->dma_bld_error = IXL1394_EPKTSIZE_MAX_OFLO;
1552 return;
1553 }
1554
1555 /* allocate an xfer_ctl struct, including 1 xfer_ctl_dma struct */
1556 if ((xctlp = hci1394_alloc_xfer_ctl(wvp, 1)) == NULL) {
1557 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1558 return;
1559 }
1560
1561 /*
1562 * save xfer_ctl struct addr in compiler_privatep of
1563 * current IXL xfer cmd
1564 */
1565 wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1566
1567 /* generate values for the xmit pkt hdrs */
1568 hci1394_set_xmit_pkt_hdr(wvp);
1569
1570 /*
1571 * xmit pkt starts with an output more immediate,
1572 * a double sized hci1394_desc
1573 */
1574 wv_omi_descp = (hci1394_output_more_imm_t *)
1575 (&wvp->descriptor_block[wvp->descriptors]);
1576 HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
1577
1578 wv_omi_descp->data_addr = 0;
1579 wv_omi_descp->branch = 0;
1580 wv_omi_descp->status = 0;
1581 wv_omi_descp->q1 = wvp->xmit_pkthdr1;
1582 wv_omi_descp->q2 = wvp->xmit_pkthdr2;
1583 wv_omi_descp->q3 = 0;
1584 wv_omi_descp->q4 = 0;
1585
1586 wvp->descriptors += 2;
1587
1588 /*
1589 * create the required output more hci1394_desc descriptor, then create
1590 * an output last hci1394_desc descriptor with xfer status enabled
1591 */
1592 for (ii = 0; ii < wvp->xfer_bufcnt; ii++) {
1593 wv_descp = &wvp->descriptor_block[wvp->descriptors];
1594
1595 if (ii == (wvp->xfer_bufcnt - 1)) {
1596 HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL,
1597 DESC_INTR_DSABL, wvp->xfer_size[ii]);
1598 } else {
1599 HCI1394_INIT_IT_OMORE(wv_descp, wvp->xfer_size[ii]);
1600 }
1601 wv_descp->data_addr = wvp->xfer_bufp[ii];
1602 wv_descp->branch = 0;
1603 wv_descp->status = 0;
1604 wvp->descriptors++;
1605 }
1606
1607 /* allocate and copy descriptor block to dma memory */
1608 if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp, &dma_desc_bound) !=
1609 DDI_SUCCESS) {
1610 /* wvp->dma_bld_error is set by above call */
1611 return;
1612 }
1613
1614 /*
1615 * set dma addrs into xfer_ctl structure (unbound addr (kernel virtual)
1616 * is last component (descriptor))
1617 */
1618 xctlp->dma[0].dma_bound = dma_desc_bound;
1619 xctlp->dma[0].dma_descp =
1620 dma_descp + (wvp->xfer_bufcnt + 1) * sizeof (hci1394_desc_t);
1621 xctlp->dma[0].dma_buf = &wvp->dma_currentp->mem;
1622 }
1623
1624 /*
1625 * hci1394_bld_xmit_buf_desc()
1626 * Used to create the openHCI dma descriptor blocks for a transmit buffer.
1627 */
1628 static void
hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t * wvp)1629 hci1394_bld_xmit_buf_desc(hci1394_comp_ixl_vars_t *wvp)
1630 {
1631 hci1394_xfer_ctl_t *xctlp;
1632 ixl1394_xfer_buf_t *local_ixl_cur_xfer_stp;
1633 hci1394_output_more_imm_t *wv_omi_descp; /* shorthand to local descrp */
1634 hci1394_desc_t *wv_descp; /* shorthand to local descriptor */
1635 caddr_t dma_descp;
1636 uint32_t dma_desc_bound;
1637 uint32_t pktsize;
1638 uint32_t pktcnt;
1639 uint32_t ii;
1640
1641 local_ixl_cur_xfer_stp = (ixl1394_xfer_buf_t *)wvp->ixl_cur_xfer_stp;
1642
1643 /* determine number and size of pkt desc blocks to create */
1644 pktsize = local_ixl_cur_xfer_stp->pkt_size;
1645 pktcnt = local_ixl_cur_xfer_stp->size / pktsize;
1646
1647 /* allocate an xfer_ctl struct including pktcnt xfer_ctl_dma structs */
1648 if ((xctlp = hci1394_alloc_xfer_ctl(wvp, pktcnt)) == NULL) {
1649 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1650 return;
1651 }
1652
1653 /*
1654 * save xfer_ctl struct addr in compiler_privatep of
1655 * current IXL xfer cmd
1656 */
1657 local_ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1658
1659 /* generate values for the xmit pkt hdrs */
1660 wvp->xfer_pktlen = pktsize;
1661 hci1394_set_xmit_pkt_hdr(wvp);
1662
1663 /*
1664 * xmit pkt starts with an output more immediate,
1665 * a double sized hci1394_desc
1666 */
1667 wv_omi_descp = (hci1394_output_more_imm_t *)
1668 &wvp->descriptor_block[wvp->descriptors];
1669
1670 HCI1394_INIT_IT_OMORE_IMM(wv_omi_descp);
1671
1672 wv_omi_descp->data_addr = 0;
1673 wv_omi_descp->branch = 0;
1674 wv_omi_descp->status = 0;
1675 wv_omi_descp->q1 = wvp->xmit_pkthdr1;
1676 wv_omi_descp->q2 = wvp->xmit_pkthdr2;
1677 wv_omi_descp->q3 = 0;
1678 wv_omi_descp->q4 = 0;
1679
1680 wvp->descriptors += 2;
1681
1682 /* follow with a single output last descriptor w/status enabled */
1683 wv_descp = &wvp->descriptor_block[wvp->descriptors];
1684 HCI1394_INIT_IT_OLAST(wv_descp, DESC_HDR_STAT_ENBL, DESC_INTR_DSABL,
1685 pktsize);
1686 wv_descp->data_addr = local_ixl_cur_xfer_stp->ixl_buf.ixldmac_addr;
1687 wv_descp->branch = 0;
1688 wv_descp->status = 0;
1689 wvp->descriptors++;
1690
1691 /*
1692 * generate as many contiguous descriptor blocks as there are
1693 * xmit packets
1694 */
1695 for (ii = 0; ii < pktcnt; ii++) {
1696
1697 /* if about to create last descriptor block */
1698 if (ii == (pktcnt - 1)) {
1699 /* check and perform any required hci cache flush */
1700 if (hci1394_flush_end_desc_check(wvp, ii) !=
1701 DDI_SUCCESS) {
1702 /* wvp->dma_bld_error is set by above call */
1703 return;
1704 }
1705 }
1706
1707 /* allocate and copy descriptor block to dma memory */
1708 if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
1709 &dma_desc_bound) != DDI_SUCCESS) {
1710 /* wvp->dma_bld_error is set by above call */
1711 return;
1712 }
1713
1714 /*
1715 * set dma addrs into xfer_ctl structure (unbound addr
1716 * (kernel virtual) is last component (descriptor))
1717 */
1718 xctlp->dma[ii].dma_bound = dma_desc_bound;
1719 xctlp->dma[ii].dma_descp = dma_descp + 2 *
1720 sizeof (hci1394_desc_t);
1721 xctlp->dma[ii].dma_buf = &wvp->dma_currentp->mem;
1722
1723 /* advance buffer ptr by pktsize in descriptor block */
1724 wvp->descriptor_block[wvp->descriptors - 1].data_addr +=
1725 pktsize;
1726 }
1727 }
1728
1729 /*
1730 * hci1394_bld_xmit_hdronly_nopkt_desc()
1731 * Used to create the openHCI dma descriptor blocks for transmitting
1732 * a packet consisting of an isochronous header with no data payload,
1733 * or for not sending a packet at all for a cycle.
1734 *
1735 * A Store_Value openhci descriptor is built at the start of each
1736 * IXL1394_OP_SEND_HDR_ONLY and IXL1394_OP_SEND_NO_PKT command's dma
1737 * descriptor block (to allow for skip cycle specification and set skipmode
1738 * processing for these commands).
1739 */
1740 static void
hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t * wvp)1741 hci1394_bld_xmit_hdronly_nopkt_desc(hci1394_comp_ixl_vars_t *wvp)
1742 {
1743 hci1394_xfer_ctl_t *xctlp;
1744 hci1394_output_last_t *wv_ol_descp; /* shorthand to local descrp */
1745 hci1394_output_last_imm_t *wv_oli_descp; /* shorthand to local descrp */
1746 caddr_t dma_descp;
1747 uint32_t dma_desc_bound;
1748 uint32_t repcnt;
1749 uint32_t ii;
1750
1751 /* determine # of instances of output hdronly/nopkt to generate */
1752 repcnt = ((ixl1394_xmit_special_t *)wvp->ixl_cur_xfer_stp)->count;
1753
1754 /*
1755 * allocate an xfer_ctl structure which includes repcnt
1756 * xfer_ctl_dma structs
1757 */
1758 if ((xctlp = hci1394_alloc_xfer_ctl(wvp, repcnt)) == NULL) {
1759
1760 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1761
1762 return;
1763 }
1764
1765 /*
1766 * save xfer_ctl struct addr in compiler_privatep of
1767 * current IXL xfer command
1768 */
1769 wvp->ixl_cur_xfer_stp->compiler_privatep = (void *)xctlp;
1770
1771 /*
1772 * create a storevalue descriptor
1773 * (will be used for skip vs jump processing)
1774 */
1775 hci1394_set_xmit_storevalue_desc(wvp);
1776
1777 /*
1778 * processing now based on opcode:
1779 * IXL1394_OP_SEND_HDR_ONLY or IXL1394_OP_SEND_NO_PKT
1780 */
1781 if ((wvp->ixl_cur_xfer_stp->ixl_opcode & ~IXL1394_OPF_UPDATE) ==
1782 IXL1394_OP_SEND_HDR_ONLY) {
1783
1784 /* for header only, generate values for the xmit pkt hdrs */
1785 hci1394_set_xmit_pkt_hdr(wvp);
1786
1787 /*
1788 * create an output last immediate (double sized) descriptor
1789 * xfer status enabled
1790 */
1791 wv_oli_descp = (hci1394_output_last_imm_t *)
1792 &wvp->descriptor_block[wvp->descriptors];
1793
1794 HCI1394_INIT_IT_OLAST_IMM(wv_oli_descp, DESC_HDR_STAT_ENBL,
1795 DESC_INTR_DSABL);
1796
1797 wv_oli_descp->data_addr = 0;
1798 wv_oli_descp->branch = 0;
1799 wv_oli_descp->status = 0;
1800 wv_oli_descp->q1 = wvp->xmit_pkthdr1;
1801 wv_oli_descp->q2 = wvp->xmit_pkthdr2;
1802 wv_oli_descp->q3 = 0;
1803 wv_oli_descp->q4 = 0;
1804 wvp->descriptors += 2;
1805 } else {
1806 /*
1807 * for skip cycle, create a single output last descriptor
1808 * with xfer status enabled
1809 */
1810 wv_ol_descp = &wvp->descriptor_block[wvp->descriptors];
1811 HCI1394_INIT_IT_OLAST(wv_ol_descp, DESC_HDR_STAT_ENBL,
1812 DESC_INTR_DSABL, 0);
1813 wv_ol_descp->data_addr = 0;
1814 wv_ol_descp->branch = 0;
1815 wv_ol_descp->status = 0;
1816 wvp->descriptors++;
1817 }
1818
1819 /*
1820 * generate as many contiguous descriptor blocks as repeat count
1821 * indicates
1822 */
1823 for (ii = 0; ii < repcnt; ii++) {
1824
1825 /* if about to create last descriptor block */
1826 if (ii == (repcnt - 1)) {
1827 /* check and perform any required hci cache flush */
1828 if (hci1394_flush_end_desc_check(wvp, ii) !=
1829 DDI_SUCCESS) {
1830 /* wvp->dma_bld_error is set by above call */
1831 return;
1832 }
1833 }
1834
1835 /* allocate and copy descriptor block to dma memory */
1836 if (hci1394_bld_dma_mem_desc_blk(wvp, &dma_descp,
1837 &dma_desc_bound) != DDI_SUCCESS) {
1838 /* wvp->dma_bld_error is set by above call */
1839 return;
1840 }
1841
1842 /*
1843 * set dma addrs into xfer_ctl structure (unbound addr
1844 * (kernel virtual) is last component (descriptor)
1845 */
1846 xctlp->dma[ii].dma_bound = dma_desc_bound;
1847 xctlp->dma[ii].dma_descp = dma_descp + sizeof (hci1394_desc_t);
1848 xctlp->dma[ii].dma_buf = &wvp->dma_currentp->mem;
1849 }
1850 }
1851
1852 /*
1853 * hci1394_bld_dma_mem_desc_blk()
1854 * Used to put a given OpenHCI descriptor block into dma bound memory.
1855 */
1856 static int
hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t * wvp,caddr_t * dma_descpp,uint32_t * dma_desc_bound)1857 hci1394_bld_dma_mem_desc_blk(hci1394_comp_ixl_vars_t *wvp, caddr_t *dma_descpp,
1858 uint32_t *dma_desc_bound)
1859 {
1860 uint32_t dma_bound;
1861
1862 /* set internal error if no descriptor blocks to build */
1863 if (wvp->descriptors == 0) {
1864 wvp->dma_bld_error = IXL1394_EINTERNAL_ERROR;
1865 return (DDI_FAILURE);
1866 }
1867
1868 /* allocate dma memory and move this descriptor block to it */
1869 *dma_descpp = (caddr_t)hci1394_alloc_dma_mem(wvp, wvp->descriptors *
1870 sizeof (hci1394_desc_t), &dma_bound);
1871
1872 if (*dma_descpp == NULL) {
1873 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
1874 return (DDI_FAILURE);
1875 }
1876 #ifdef _KERNEL
1877 ddi_rep_put32(wvp->dma_currentp->mem.bi_handle,
1878 (uint_t *)wvp->descriptor_block, (uint_t *)*dma_descpp,
1879 wvp->descriptors * (sizeof (hci1394_desc_t) >> 2),
1880 DDI_DEV_AUTOINCR);
1881 #else
1882 bcopy(wvp->descriptor_block, *dma_descpp,
1883 wvp->descriptors * sizeof (hci1394_desc_t));
1884 #endif
1885 /*
1886 * convert allocated block's memory address to bus address space
1887 * include properly set Z bits (descriptor count).
1888 */
1889 *dma_desc_bound = (dma_bound & ~DESC_Z_MASK) | wvp->descriptors;
1890
1891 return (DDI_SUCCESS);
1892 }
1893
1894 /*
1895 * hci1394_set_xmit_pkt_hdr()
1896 * Compose the 2 quadlets for the xmit packet header.
1897 */
1898 static void
hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t * wvp)1899 hci1394_set_xmit_pkt_hdr(hci1394_comp_ixl_vars_t *wvp)
1900 {
1901 uint16_t tag;
1902 uint16_t sync;
1903
1904 /*
1905 * choose tag and sync bits for header either from default values or
1906 * from currently active set tag and sync IXL command
1907 * (clear command after use)
1908 */
1909 if (wvp->ixl_settagsync_cmdp == NULL) {
1910 tag = wvp->default_tag;
1911 sync = wvp->default_sync;
1912 } else {
1913 tag = wvp->ixl_settagsync_cmdp->tag;
1914 sync = wvp->ixl_settagsync_cmdp->sync;
1915 wvp->ixl_settagsync_cmdp = NULL;
1916 }
1917 tag &= (DESC_PKT_TAG_MASK >> DESC_PKT_TAG_SHIFT);
1918 sync &= (DESC_PKT_SY_MASK >> DESC_PKT_SY_SHIFT);
1919
1920 /*
1921 * build xmit pkt header -
1922 * hdr1 has speed, tag, channel number and sync bits
1923 * hdr2 has the packet length.
1924 */
1925 wvp->xmit_pkthdr1 = (wvp->ctxtp->isospd << DESC_PKT_SPD_SHIFT) |
1926 (tag << DESC_PKT_TAG_SHIFT) | (wvp->ctxtp->isochan <<
1927 DESC_PKT_CHAN_SHIFT) | (IEEE1394_TCODE_ISOCH <<
1928 DESC_PKT_TCODE_SHIFT) | (sync << DESC_PKT_SY_SHIFT);
1929
1930 wvp->xmit_pkthdr2 = wvp->xfer_pktlen << DESC_PKT_DATALEN_SHIFT;
1931 }
1932
1933 /*
1934 * hci1394_set_xmit_skip_mode()
1935 * Set current skip mode from default or from currently active command.
1936 * If non-default skip mode command's skip mode is skip to label, find
1937 * and set xfer start IXL command which follows skip to label into
1938 * compiler_privatep of set skipmode IXL command.
1939 */
1940 static void
hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t * wvp)1941 hci1394_set_xmit_skip_mode(hci1394_comp_ixl_vars_t *wvp)
1942 {
1943 int err;
1944
1945 if (wvp->ixl_setskipmode_cmdp == NULL) {
1946 wvp->skipmode = wvp->default_skipmode;
1947 wvp->skiplabelp = wvp->default_skiplabelp;
1948 wvp->skipxferp = wvp->default_skipxferp;
1949 } else {
1950 wvp->skipmode = wvp->ixl_setskipmode_cmdp->skipmode;
1951 wvp->skiplabelp = wvp->ixl_setskipmode_cmdp->label;
1952 wvp->skipxferp = NULL;
1953 if (wvp->skipmode == IXL1394_SKIP_TO_LABEL) {
1954 err = hci1394_ixl_find_next_exec_xfer(wvp->skiplabelp,
1955 NULL, &wvp->skipxferp);
1956 if (err == DDI_FAILURE) {
1957 wvp->skipxferp = NULL;
1958 wvp->dma_bld_error = IXL1394_ENO_DATA_PKTS;
1959 }
1960 }
1961 wvp->ixl_setskipmode_cmdp->compiler_privatep =
1962 (void *)wvp->skipxferp;
1963 }
1964 }
1965
1966 /*
1967 * hci1394_set_xmit_storevalue_desc()
1968 * Set up store_value DMA descriptor.
1969 * XMIT_HDRONLY or XMIT_NOPKT xfer states use a store value as first
1970 * descriptor in the descriptor block (to handle skip mode processing)
1971 */
1972 static void
hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t * wvp)1973 hci1394_set_xmit_storevalue_desc(hci1394_comp_ixl_vars_t *wvp)
1974 {
1975 wvp->descriptors++;
1976
1977 HCI1394_INIT_IT_STORE(&wvp->descriptor_block[wvp->descriptors - 1],
1978 wvp->storevalue_data);
1979 wvp->descriptor_block[wvp->descriptors - 1].data_addr =
1980 wvp->storevalue_bufp;
1981 wvp->descriptor_block[wvp->descriptors - 1].branch = 0;
1982 wvp->descriptor_block[wvp->descriptors - 1].status = 0;
1983 }
1984
1985 /*
1986 * hci1394_set_next_xfer_buf()
1987 * This routine adds the data buffer to the current wvp list.
1988 * Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
1989 * contains the error code.
1990 */
1991 static int
hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t * wvp,uint32_t bufp,uint16_t size)1992 hci1394_set_next_xfer_buf(hci1394_comp_ixl_vars_t *wvp, uint32_t bufp,
1993 uint16_t size)
1994 {
1995 /* error if buffer pointer is null (size may be 0) */
1996 if (bufp == 0) {
1997 wvp->dma_bld_error = IXL1394_ENULL_BUFFER_ADDR;
1998 return (DDI_FAILURE);
1999 }
2000
2001 /* count new xfer buffer */
2002 wvp->xfer_bufcnt++;
2003
2004 /* error if exceeds maximum xfer buffer components allowed */
2005 if (wvp->xfer_bufcnt > HCI1394_DESC_MAX_Z) {
2006 wvp->dma_bld_error = IXL1394_EFRAGMENT_OFLO;
2007 return (DDI_FAILURE);
2008 }
2009
2010 /* save xmit buffer and size */
2011 wvp->xfer_bufp[wvp->xfer_bufcnt - 1] = bufp;
2012 wvp->xfer_size[wvp->xfer_bufcnt - 1] = size;
2013
2014 /* accumulate total packet length */
2015 wvp->xfer_pktlen += size;
2016
2017 return (DDI_SUCCESS);
2018 }
2019
2020 /*
2021 * hci1394_flush_end_desc_check()
2022 * Check if flush required before last descriptor block of a
2023 * non-unary set generated by an xfer buff or xmit special command
2024 * or a unary set provided no other flush has already been done.
2025 *
2026 * hci flush is required if xfer is finalized by an updateable
2027 * jump command.
2028 *
2029 * Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2030 * will contain the error code.
2031 */
2032 static int
hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t * wvp,uint32_t count)2033 hci1394_flush_end_desc_check(hci1394_comp_ixl_vars_t *wvp, uint32_t count)
2034 {
2035 if ((count != 0) ||
2036 ((wvp->xfer_hci_flush & (UPDATEABLE_XFER | UPDATEABLE_SET |
2037 INITIATING_LBL)) == 0)) {
2038
2039 if (wvp->xfer_hci_flush & UPDATEABLE_JUMP) {
2040 if (hci1394_flush_hci_cache(wvp) != DDI_SUCCESS) {
2041 /* wvp->dma_bld_error is set by above call */
2042 return (DDI_FAILURE);
2043 }
2044 }
2045 }
2046
2047 return (DDI_SUCCESS);
2048 }
2049
2050 /*
2051 * hci1394_flush_hci_cache()
2052 * Sun hci controller (RIO) implementation specific processing!
2053 *
2054 * Allocate dma memory for 1 hci descriptor block which will be left unused.
2055 * During execution this will cause a break in the contiguous address space
2056 * processing required by Sun's RIO implementation of the ohci controller and
2057 * will require the controller to refetch the next descriptor block from
2058 * host memory.
2059 *
2060 * General rules for cache flush preceeding a descriptor block in dma memory:
2061 * 1. Current IXL Xfer Command Updateable Rule:
2062 * Cache flush of IXL xfer command is required if it, or any of the
2063 * non-start IXL packet xfer commands associated with it, is flagged
2064 * updateable.
2065 * 2. Next IXL Xfer Command Indeterminate Rule:
2066 * Cache flush of IXL xfer command is required if an IXL jump command
2067 * which is flagged updateable has finalized the current IXL xfer
2068 * command.
2069 * 3. Updateable IXL Set Command Rule:
2070 * Cache flush of an IXL xfer command is required if any of the IXL
2071 * "Set" commands (IXL1394_OP_SET_*) associated with the IXL xfer
2072 * command (i.e. immediately preceeding it), is flagged updateable.
2073 * 4. Label Initiating Xfer Command Rule:
2074 * Cache flush of IXL xfer command is required if it is initiated by a
2075 * label IXL command. (This is to allow both a flush of the cache and
2076 * an interrupt to be generated easily and in close proximity to each
2077 * other. This can make possible simpler more successful reset of
2078 * descriptor statuses, especially under circumstances where the cycle
2079 * of hci commands is short and/or there are no callbacks distributed
2080 * through the span of xfers, etc... This is especially important for
2081 * input where statuses must be reset before execution cycles back
2082 * again.
2083 *
2084 * Application of above rules:
2085 * Packet mode IXL xfer commands:
2086 * If any of the above flush rules apply, flush cache should be done
2087 * immediately preceeding the generation of the dma descriptor block
2088 * for the packet xfer.
2089 * Non-packet mode IXL xfer commands (including IXL1394_OP_*BUF*,
2090 * SEND_HDR_ONLY, and SEND_NO_PKT):
2091 * If Rules #1, #3 or #4 applies, a flush cache should be done
2092 * immediately before the first generated dma descriptor block of the
2093 * non-packet xfer.
2094 * If Rule #2 applies, a flush cache should be done immediately before
2095 * the last generated dma descriptor block of the non-packet xfer.
2096 *
2097 * Note: The flush cache should be done at most once in each location that is
2098 * required to be flushed no matter how many rules apply (i.e. only once
2099 * before the first descriptor block and/or only once before the last
2100 * descriptor block generated). If more than one place requires a flush,
2101 * then both flush operations must be performed. This is determined by
2102 * taking all rules that apply into account.
2103 *
2104 * Returns DDI_SUCCESS or DDI_FAILURE. If DDI_FAILURE, wvp->dma_bld_error
2105 * will contain the error code.
2106 */
2107 static int
hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t * wvp)2108 hci1394_flush_hci_cache(hci1394_comp_ixl_vars_t *wvp)
2109 {
2110 uint32_t dma_bound;
2111
2112 if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t), &dma_bound) ==
2113 NULL) {
2114 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2115 return (DDI_FAILURE);
2116 }
2117
2118 return (DDI_SUCCESS);
2119 }
2120
2121 /*
2122 * hci1394_alloc_storevalue_dma_mem()
2123 * Allocate dma memory for a 1 hci component descriptor block
2124 * which will be used as the dma memory location that ixl
2125 * compiler generated storevalue descriptor commands will
2126 * specify as location to store their data value.
2127 *
2128 * Returns 32-bit bound address of allocated mem, or NULL.
2129 */
2130 static uint32_t
hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t * wvp)2131 hci1394_alloc_storevalue_dma_mem(hci1394_comp_ixl_vars_t *wvp)
2132 {
2133 uint32_t dma_bound;
2134
2135 if (hci1394_alloc_dma_mem(wvp, sizeof (hci1394_desc_t),
2136 &dma_bound) == NULL) {
2137 wvp->dma_bld_error = IXL1394_EMEM_ALLOC_FAIL;
2138 return (0);
2139 }
2140
2141 /* return bound address of allocated memory */
2142 return (dma_bound);
2143 }
2144
2145
2146 /*
2147 * hci1394_alloc_xfer_ctl()
2148 * Allocate an xfer_ctl structure.
2149 */
2150 static hci1394_xfer_ctl_t *
hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t * wvp,uint32_t dmacnt)2151 hci1394_alloc_xfer_ctl(hci1394_comp_ixl_vars_t *wvp, uint32_t dmacnt)
2152 {
2153 hci1394_xfer_ctl_t *xcsp;
2154
2155 /*
2156 * allocate an xfer_ctl struct which includes dmacnt of
2157 * xfer_ctl_dma structs
2158 */
2159 #ifdef _KERNEL
2160 if ((xcsp = (hci1394_xfer_ctl_t *)kmem_zalloc(
2161 (sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
2162 sizeof (hci1394_xfer_ctl_dma_t)), KM_NOSLEEP)) == NULL) {
2163
2164 return (NULL);
2165 }
2166 #else
2167 /*
2168 * This section makes it possible to easily run and test the compiler in
2169 * user mode.
2170 */
2171 if ((xcsp = (hci1394_xfer_ctl_t *)calloc(1,
2172 sizeof (hci1394_xfer_ctl_t) + (dmacnt - 1) *
2173 sizeof (hci1394_xfer_ctl_dma_t))) == NULL) {
2174 return (NULL);
2175 }
2176 #endif
2177 /*
2178 * set dma structure count into allocated xfer_ctl struct for
2179 * later deletion.
2180 */
2181 xcsp->cnt = dmacnt;
2182
2183 /* link it to previously allocated xfer_ctl structs or set as first */
2184 if (wvp->xcs_firstp == NULL) {
2185 wvp->xcs_firstp = wvp->xcs_currentp = xcsp;
2186 } else {
2187 wvp->xcs_currentp->ctl_nextp = xcsp;
2188 wvp->xcs_currentp = xcsp;
2189 }
2190
2191 /* return allocated xfer_ctl structure */
2192 return (xcsp);
2193 }
2194
2195 /*
2196 * hci1394_alloc_dma_mem()
2197 * Allocates and binds memory for openHCI DMA descriptors as needed.
2198 */
2199 static void *
hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t * wvp,uint32_t size,uint32_t * dma_bound)2200 hci1394_alloc_dma_mem(hci1394_comp_ixl_vars_t *wvp, uint32_t size,
2201 uint32_t *dma_bound)
2202 {
2203 hci1394_idma_desc_mem_t *dma_new;
2204 hci1394_buf_parms_t parms;
2205 hci1394_buf_info_t *memp;
2206 void *dma_mem_ret;
2207 int ret;
2208
2209 /*
2210 * if no dma has been allocated or current request exceeds
2211 * remaining memory
2212 */
2213 if ((wvp->dma_currentp == NULL) ||
2214 (size > (wvp->dma_currentp->mem.bi_cookie.dmac_size -
2215 wvp->dma_currentp->used))) {
2216 #ifdef _KERNEL
2217 /* kernel-mode memory allocation for driver */
2218
2219 /* allocate struct to track more dma descriptor memory */
2220 if ((dma_new = (hci1394_idma_desc_mem_t *)
2221 kmem_zalloc(sizeof (hci1394_idma_desc_mem_t),
2222 KM_NOSLEEP)) == NULL) {
2223 return (NULL);
2224 }
2225
2226 /*
2227 * if more cookies available from the current mem, try to find
2228 * one of suitable size. Cookies that are too small will be
2229 * skipped and unused. Given that cookie size is always at least
2230 * 1 page long and HCI1394_DESC_MAX_Z is much smaller than that,
2231 * it's a small price to pay for code simplicity.
2232 */
2233 if (wvp->dma_currentp != NULL) {
2234 /* new struct is derived from current */
2235 memp = &wvp->dma_currentp->mem;
2236 dma_new->mem = *memp;
2237 dma_new->offset = wvp->dma_currentp->offset +
2238 memp->bi_cookie.dmac_size;
2239
2240 for (; memp->bi_cookie_count > 1;
2241 memp->bi_cookie_count--) {
2242 ddi_dma_nextcookie(memp->bi_dma_handle,
2243 &dma_new->mem.bi_cookie);
2244
2245 if (dma_new->mem.bi_cookie.dmac_size >= size) {
2246 dma_new->mem_handle =
2247 wvp->dma_currentp->mem_handle;
2248 wvp->dma_currentp->mem_handle = NULL;
2249 dma_new->mem.bi_cookie_count--;
2250 break;
2251 }
2252 dma_new->offset +=
2253 dma_new->mem.bi_cookie.dmac_size;
2254 }
2255 }
2256
2257 /* if no luck with current buffer, allocate a new one */
2258 if (dma_new->mem_handle == NULL) {
2259 parms.bp_length = HCI1394_IXL_PAGESIZE;
2260 parms.bp_max_cookies = OHCI_MAX_COOKIE;
2261 parms.bp_alignment = 16;
2262 ret = hci1394_buf_alloc(&wvp->soft_statep->drvinfo,
2263 &parms, &dma_new->mem, &dma_new->mem_handle);
2264 if (ret != DDI_SUCCESS) {
2265 kmem_free(dma_new,
2266 sizeof (hci1394_idma_desc_mem_t));
2267
2268 return (NULL);
2269 }
2270
2271 /* paranoia: this is not supposed to happen */
2272 if (dma_new->mem.bi_cookie.dmac_size < size) {
2273 hci1394_buf_free(&dma_new->mem_handle);
2274 kmem_free(dma_new,
2275 sizeof (hci1394_idma_desc_mem_t));
2276
2277 return (NULL);
2278 }
2279 dma_new->offset = 0;
2280 }
2281 #else
2282 /* user-mode memory allocation for user mode compiler tests */
2283 /* allocate another dma_desc_mem struct */
2284 if ((dma_new = (hci1394_idma_desc_mem_t *)
2285 calloc(1, sizeof (hci1394_idma_desc_mem_t))) == NULL) {
2286 return (NULL);
2287 }
2288 dma_new->mem.bi_dma_handle = NULL;
2289 dma_new->mem.bi_handle = NULL;
2290 if ((dma_new->mem.bi_kaddr = (caddr_t)calloc(1,
2291 HCI1394_IXL_PAGESIZE)) == NULL) {
2292 return (NULL);
2293 }
2294 dma_new->mem.bi_cookie.dmac_address =
2295 (unsigned long)dma_new->mem.bi_kaddr;
2296 dma_new->mem.bi_real_length = HCI1394_IXL_PAGESIZE;
2297 dma_new->mem.bi_cookie_count = 1;
2298 #endif
2299
2300 /* if this is not first dma_desc_mem, link last one to it */
2301 if (wvp->dma_currentp != NULL) {
2302 wvp->dma_currentp->dma_nextp = dma_new;
2303 wvp->dma_currentp = dma_new;
2304 } else {
2305 /* else set it as first one */
2306 wvp->dma_currentp = wvp->dma_firstp = dma_new;
2307 }
2308 }
2309
2310 /* now allocate requested memory from current block */
2311 dma_mem_ret = wvp->dma_currentp->mem.bi_kaddr +
2312 wvp->dma_currentp->offset + wvp->dma_currentp->used;
2313 *dma_bound = wvp->dma_currentp->mem.bi_cookie.dmac_address +
2314 wvp->dma_currentp->used;
2315 wvp->dma_currentp->used += size;
2316
2317 return (dma_mem_ret);
2318 }
2319
2320
2321 /*
2322 * hci1394_is_opcode_valid()
2323 * given an ixl opcode, this routine returns B_TRUE if it is a
2324 * recognized opcode and B_FALSE if it is not recognized.
2325 * Note that the FULL 16 bits of the opcode are checked which includes
2326 * various flags and not just the low order 8 bits of unique code.
2327 */
2328 static boolean_t
hci1394_is_opcode_valid(uint16_t ixlopcode)2329 hci1394_is_opcode_valid(uint16_t ixlopcode)
2330 {
2331 /* if it's not one we know about, then it's bad */
2332 switch (ixlopcode) {
2333 case IXL1394_OP_LABEL:
2334 case IXL1394_OP_JUMP:
2335 case IXL1394_OP_CALLBACK:
2336 case IXL1394_OP_RECV_PKT:
2337 case IXL1394_OP_RECV_PKT_ST:
2338 case IXL1394_OP_RECV_BUF:
2339 case IXL1394_OP_SEND_PKT:
2340 case IXL1394_OP_SEND_PKT_ST:
2341 case IXL1394_OP_SEND_PKT_WHDR_ST:
2342 case IXL1394_OP_SEND_BUF:
2343 case IXL1394_OP_SEND_HDR_ONLY:
2344 case IXL1394_OP_SEND_NO_PKT:
2345 case IXL1394_OP_STORE_TIMESTAMP:
2346 case IXL1394_OP_SET_TAGSYNC:
2347 case IXL1394_OP_SET_SKIPMODE:
2348 case IXL1394_OP_SET_SYNCWAIT:
2349 case IXL1394_OP_JUMP_U:
2350 case IXL1394_OP_CALLBACK_U:
2351 case IXL1394_OP_RECV_PKT_U:
2352 case IXL1394_OP_RECV_PKT_ST_U:
2353 case IXL1394_OP_RECV_BUF_U:
2354 case IXL1394_OP_SEND_PKT_U:
2355 case IXL1394_OP_SEND_PKT_ST_U:
2356 case IXL1394_OP_SEND_PKT_WHDR_ST_U:
2357 case IXL1394_OP_SEND_BUF_U:
2358 case IXL1394_OP_SET_TAGSYNC_U:
2359 case IXL1394_OP_SET_SKIPMODE_U:
2360 return (B_TRUE);
2361 default:
2362 return (B_FALSE);
2363 }
2364 }
2365