1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 1999-2002 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * hci1394_ixl_misc.c
29 * Isochronous IXL miscellaneous routines.
30 * Contains common routines used by the ixl compiler, interrupt handler and
31 * dynamic update.
32 */
33
34 #include <sys/kmem.h>
35 #include <sys/types.h>
36 #include <sys/conf.h>
37 #include <sys/1394/h1394.h>
38 #include <sys/1394/ixl1394.h>
39 #include <sys/1394/adapters/hci1394.h>
40
41
42 /* local routines */
43 static void hci1394_delete_dma_desc_mem(hci1394_state_t *soft_statep,
44 hci1394_idma_desc_mem_t *);
45 static void hci1394_delete_xfer_ctl(hci1394_xfer_ctl_t *);
46
47
48 /*
49 * hci1394_ixl_set_start()
50 * Set up the context structure with the first ixl command to process
51 * and the first hci descriptor to execute.
52 *
53 * This function assumes the current context is stopped!
54 *
55 * If ixlstp IS NOT null AND is not the first compiled ixl command and
56 * is not an ixl label command, returns an error.
57 * If ixlstp IS null, uses the first compiled ixl command (ixl_firstp)
58 * in place of ixlstp.
59 *
60 * If no executeable xfer found along exec path from ixlstp, returns error.
61 */
62 int
hci1394_ixl_set_start(hci1394_iso_ctxt_t * ctxtp,ixl1394_command_t * ixlstp)63 hci1394_ixl_set_start(hci1394_iso_ctxt_t *ctxtp, ixl1394_command_t *ixlstp)
64 {
65
66 ixl1394_command_t *ixl_exec_startp;
67
68 /* if ixl start command is null, use first compiled ixl command */
69 if (ixlstp == NULL) {
70 ixlstp = ctxtp->ixl_firstp;
71 }
72
73 /*
74 * if ixl start command is not first ixl compiled and is not a label,
75 * error
76 */
77 if ((ixlstp != ctxtp->ixl_firstp) && (ixlstp->ixl_opcode !=
78 IXL1394_OP_LABEL)) {
79 return (-1);
80 }
81
82 /* follow exec path to find first ixl command that's an xfer command */
83 (void) hci1394_ixl_find_next_exec_xfer(ixlstp, NULL, &ixl_exec_startp);
84
85 /*
86 * if there was one, then in it's compiler private, its
87 * hci1394_xfer_ctl structure has the appropriate bound address
88 */
89 if (ixl_exec_startp != NULL) {
90
91 /* set up for start of context and return done */
92 ctxtp->dma_mem_execp = (uint32_t)((hci1394_xfer_ctl_t *)
93 ixl_exec_startp->compiler_privatep)->dma[0].dma_bound;
94
95 ctxtp->dma_last_time = 0;
96 ctxtp->ixl_exec_depth = 0;
97 ctxtp->ixl_execp = ixlstp;
98 ctxtp->rem_noadv_intrs = ctxtp->max_noadv_intrs;
99
100 return (0);
101 }
102
103 /* else no executeable xfer command found, return error */
104 return (1);
105 }
106 #ifdef _KERNEL
107 /*
108 * hci1394_ixl_reset_status()
109 * Reset all statuses in all hci descriptor blocks associated with the
110 * current linked list of compiled ixl commands.
111 *
112 * This function assumes the current context is stopped!
113 */
114 void
hci1394_ixl_reset_status(hci1394_iso_ctxt_t * ctxtp)115 hci1394_ixl_reset_status(hci1394_iso_ctxt_t *ctxtp)
116 {
117 ixl1394_command_t *ixlcur;
118 ixl1394_command_t *ixlnext;
119 hci1394_xfer_ctl_t *xferctlp;
120 uint_t ixldepth;
121 uint16_t timestamp;
122
123 ixlnext = ctxtp->ixl_firstp;
124
125 /*
126 * Scan for next ixl xfer start command along ixl link path.
127 * Once xfer command found, clear its hci descriptor block's
128 * status. If is composite ixl xfer command, clear statuses
129 * in each of its hci descriptor blocks.
130 */
131 while (ixlnext != NULL) {
132
133 /* set current and next ixl command */
134 ixlcur = ixlnext;
135 ixlnext = ixlcur->next_ixlp;
136
137 /* skip to examine next if this is not xfer start ixl command */
138 if (((ixlcur->ixl_opcode & IXL1394_OPF_ISXFER) == 0) ||
139 ((ixlcur->ixl_opcode & IXL1394_OPTY_MASK) == 0)) {
140 continue;
141 }
142
143 /* get control struct for this xfer start ixl command */
144 xferctlp = (hci1394_xfer_ctl_t *)ixlcur->compiler_privatep;
145
146 /* clear status in each hci descriptor block for this ixl cmd */
147 ixldepth = 0;
148 while (ixldepth < xferctlp->cnt) {
149 (void) hci1394_ixl_check_status(
150 &xferctlp->dma[ixldepth], ixlcur->ixl_opcode,
151 ×tamp, B_TRUE);
152 ixldepth++;
153 }
154 }
155 }
156 #endif
157 /*
158 * hci1394_ixl_find_next_exec_xfer()
159 * Follows execution path of ixl linked list until finds next xfer start IXL
160 * command, including the current IXL command or finds end of IXL linked
161 * list. Counts callback commands found along the way. (Previously, counted
162 * store timestamp commands, as well.)
163 *
164 * To detect an infinite loop of label<->jump without an intervening xfer,
165 * a tolerance level of HCI1394_IXL_MAX_SEQ_JUMPS is used. Once this
166 * number of jumps is traversed, the IXL prog is assumed to have a loop.
167 *
168 * Returns DDI_SUCCESS or DDI_FAILURE. DDI_FAILURE, indicates an infinite
169 * loop of labels & jumps was detected without any intervening xfers.
170 * DDI_SUCCESS indicates the next_exec_ixlpp contains the next xfer ixlp
171 * address, or NULL indicating the end of the list was reached. Note that
172 * DDI_FAILURE can only be returned during the IXL compilation phase, and
173 * not during ixl_update processing.
174 */
175 int
hci1394_ixl_find_next_exec_xfer(ixl1394_command_t * ixl_start,uint_t * callback_cnt,ixl1394_command_t ** next_exec_ixlpp)176 hci1394_ixl_find_next_exec_xfer(ixl1394_command_t *ixl_start,
177 uint_t *callback_cnt, ixl1394_command_t **next_exec_ixlpp)
178 {
179 uint16_t ixlopcode;
180 boolean_t xferfound;
181 ixl1394_command_t *ixlp;
182 int ii;
183
184 ixlp = ixl_start;
185 xferfound = B_FALSE;
186 ii = HCI1394_IXL_MAX_SEQ_JUMPS;
187 if (callback_cnt != NULL) {
188 *callback_cnt = 0;
189 }
190
191 /* continue until xfer start ixl cmd or end of ixl list found */
192 while ((xferfound == B_FALSE) && (ixlp != NULL) && (ii > 0)) {
193
194 /* get current ixl cmd opcode without update flag */
195 ixlopcode = ixlp->ixl_opcode & ~IXL1394_OPF_UPDATE;
196
197 /* if found an xfer start ixl command, are done */
198 if (((ixlopcode & IXL1394_OPF_ISXFER) != 0) &&
199 ((ixlopcode & IXL1394_OPTY_MASK) != 0)) {
200 xferfound = B_TRUE;
201 continue;
202 }
203
204 /* if found jump command, adjust to follow its path */
205 if (ixlopcode == IXL1394_OP_JUMP) {
206 ixlp = (ixl1394_command_t *)
207 ((ixl1394_jump_t *)ixlp)->label;
208 ii--;
209
210 /* if exceeded tolerance, give up */
211 if (ii == 0) {
212 return (DDI_FAILURE);
213 }
214 continue;
215 }
216
217 /* if current ixl command is a callback, count it */
218 if ((ixlopcode == IXL1394_OP_CALLBACK) &&
219 (callback_cnt != NULL)) {
220 (*callback_cnt)++;
221 }
222
223 /* advance to next linked ixl command */
224 ixlp = ixlp->next_ixlp;
225 }
226
227 /* return ixl xfer start command found, if any */
228 *next_exec_ixlpp = ixlp;
229
230 return (DDI_SUCCESS);
231 }
232 #ifdef _KERNEL
233 /*
234 * hci1394_ixl_check_status()
235 * Read the descriptor status and hdrs, clear as appropriate.
236 */
237 int32_t
hci1394_ixl_check_status(hci1394_xfer_ctl_dma_t * dma,uint16_t ixlopcode,uint16_t * timestamp,boolean_t do_status_reset)238 hci1394_ixl_check_status(hci1394_xfer_ctl_dma_t *dma, uint16_t ixlopcode,
239 uint16_t *timestamp, boolean_t do_status_reset)
240 {
241 uint16_t bufsiz;
242 uint16_t hcicnt;
243 uint16_t hcirecvcnt;
244 hci1394_desc_t *hcidescp;
245 off_t hcidesc_off;
246 ddi_acc_handle_t acc_hdl;
247 ddi_dma_handle_t dma_hdl;
248 uint32_t desc_status;
249 uint32_t desc_hdr;
250
251 /* last dma descriptor in descriptor block from dma structure */
252 hcidescp = (hci1394_desc_t *)(dma->dma_descp);
253 hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
254 acc_hdl = dma->dma_buf->bi_handle;
255 dma_hdl = dma->dma_buf->bi_dma_handle;
256
257 /* if current ixl command opcode is xmit */
258 if ((ixlopcode & IXL1394_OPF_ONXMIT) != 0) {
259
260 /* Sync the descriptor before we get the status */
261 (void) ddi_dma_sync(dma_hdl, hcidesc_off,
262 sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
263 desc_status = ddi_get32(acc_hdl, &hcidescp->status);
264
265 /* check if status is set in last dma descriptor in block */
266 if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
267 /*
268 * dma descriptor status set - I/O done.
269 * if not to reset status, just return; else extract
270 * timestamp, reset desc status and return dma
271 * descriptor block status set
272 */
273 if (do_status_reset == B_FALSE) {
274 return (1);
275 }
276 *timestamp = (uint16_t)
277 ((desc_status & DESC_ST_TIMESTAMP_MASK) >>
278 DESC_ST_TIMESTAMP_SHIFT);
279 ddi_put32(acc_hdl, &hcidescp->status, 0);
280
281 /* Sync descriptor for device (status was cleared) */
282 (void) ddi_dma_sync(dma_hdl, hcidesc_off,
283 sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORDEV);
284
285 return (1);
286 }
287 /* else, return dma descriptor block status not set */
288 return (0);
289 }
290
291 /* else current ixl opcode is recv */
292 hcirecvcnt = 0;
293
294 /* get count of descriptors in current dma descriptor block */
295 hcicnt = dma->dma_bound & DESC_Z_MASK;
296 hcidescp -= (hcicnt - 1);
297 hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
298
299 /* iterate fwd through hci descriptors until end or find status set */
300 while (hcicnt-- != 0) {
301
302 /* Sync the descriptor before we get the status */
303 (void) ddi_dma_sync(dma_hdl, hcidesc_off,
304 hcicnt * sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
305
306 desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
307
308 /* get cur buffer size & accumulate potential buffr usage */
309 bufsiz = (desc_hdr & DESC_HDR_REQCOUNT_MASK) >>
310 DESC_HDR_REQCOUNT_SHIFT;
311 hcirecvcnt += bufsiz;
312
313 desc_status = ddi_get32(acc_hdl, &hcidescp->status);
314
315 /* check if status set on this descriptor block descriptor */
316 if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
317 /*
318 * dma descriptor status set - I/O done.
319 * if not to reset status, just return; else extract
320 * buffer space used, reset desc status and return dma
321 * descriptor block status set
322 */
323 if (do_status_reset == B_FALSE) {
324 return (1);
325 }
326
327 hcirecvcnt -= (desc_status & DESC_ST_RESCOUNT_MASK) >>
328 DESC_ST_RESCOUNT_SHIFT;
329 *timestamp = hcirecvcnt;
330 desc_status = (bufsiz << DESC_ST_RESCOUNT_SHIFT) &
331 DESC_ST_RESCOUNT_MASK;
332 ddi_put32(acc_hdl, &hcidescp->status, desc_status);
333
334 /* Sync descriptor for device (status was cleared) */
335 (void) ddi_dma_sync(dma_hdl, hcidesc_off,
336 sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORDEV);
337
338 return (1);
339 } else {
340 /* else, set to evaluate next descriptor. */
341 hcidescp++;
342 hcidesc_off = (off_t)hcidescp -
343 (off_t)dma->dma_buf->bi_kaddr;
344 }
345 }
346
347 /* return input not complete status */
348 return (0);
349 }
350 #endif
351 /*
352 * hci1394_ixl_cleanup()
353 * Delete all memory that has earlier been allocated for a context's IXL prog
354 */
355 void
hci1394_ixl_cleanup(hci1394_state_t * soft_statep,hci1394_iso_ctxt_t * ctxtp)356 hci1394_ixl_cleanup(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp)
357 {
358 hci1394_delete_xfer_ctl((hci1394_xfer_ctl_t *)ctxtp->xcs_firstp);
359 hci1394_delete_dma_desc_mem(soft_statep, ctxtp->dma_firstp);
360 }
361
362 /*
363 * hci1394_delete_dma_desc_mem()
364 * Iterate through linked list of dma memory descriptors, deleting
365 * allocated dma memory blocks, then deleting the dma memory
366 * descriptor after advancing to next one
367 */
368 static void
369 /* ARGSUSED */
hci1394_delete_dma_desc_mem(hci1394_state_t * soft_statep,hci1394_idma_desc_mem_t * dma_firstp)370 hci1394_delete_dma_desc_mem(hci1394_state_t *soft_statep,
371 hci1394_idma_desc_mem_t *dma_firstp)
372 {
373 hci1394_idma_desc_mem_t *dma_next;
374
375 while (dma_firstp != NULL) {
376 dma_next = dma_firstp->dma_nextp;
377 #ifdef _KERNEL
378 /*
379 * if this dma descriptor memory block has the handles, then
380 * free the memory. (Note that valid handles are kept only with
381 * the most recently acquired cookie, and that each cookie is in
382 * it's own idma_desc_mem_t struct.)
383 */
384 if (dma_firstp->mem_handle != NULL) {
385 hci1394_buf_free(&dma_firstp->mem_handle);
386 }
387
388 /* free current dma memory descriptor */
389 kmem_free(dma_firstp, sizeof (hci1394_idma_desc_mem_t));
390 #else
391 /* user mode free */
392 /* free dma memory block and current dma mem descriptor */
393 free(dma_firstp->mem.bi_kaddr);
394 free(dma_firstp);
395 #endif
396 /* advance to next dma memory descriptor */
397 dma_firstp = dma_next;
398 }
399 }
400
401 /*
402 * hci1394_delete_xfer_ctl()
403 * Iterate thru linked list of xfer_ctl structs, deleting allocated memory.
404 */
405 void
hci1394_delete_xfer_ctl(hci1394_xfer_ctl_t * xcsp)406 hci1394_delete_xfer_ctl(hci1394_xfer_ctl_t *xcsp)
407 {
408 hci1394_xfer_ctl_t *delp;
409
410 while ((delp = xcsp) != NULL) {
411 /* advance ptr to next xfer_ctl struct */
412 xcsp = xcsp->ctl_nextp;
413
414 /*
415 * delete current xfer_ctl struct and included
416 * xfer_ctl_dma structs
417 */
418 #ifdef _KERNEL
419 kmem_free(delp,
420 sizeof (hci1394_xfer_ctl_t) +
421 sizeof (hci1394_xfer_ctl_dma_t) * (delp->cnt - 1));
422 #else
423 free(delp);
424 #endif
425 }
426 }
427