xref: /titanic_51/usr/src/uts/common/io/1394/adapters/hci1394_ixl_misc.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 1999-2002 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * hci1394_ixl_misc.c
31  *    Isochronous IXL miscellaneous routines.
32  *    Contains common routines used by the ixl compiler, interrupt handler and
33  *    dynamic update.
34  */
35 
36 #include <sys/kmem.h>
37 #include <sys/types.h>
38 #include <sys/conf.h>
39 
40 #include <sys/tnf_probe.h>
41 
42 #include <sys/1394/h1394.h>
43 #include <sys/1394/ixl1394.h>
44 #include <sys/1394/adapters/hci1394.h>
45 
46 
47 /* local routines */
48 static void hci1394_delete_dma_desc_mem(hci1394_state_t *soft_statep,
49     hci1394_idma_desc_mem_t *);
50 static void hci1394_delete_xfer_ctl(hci1394_xfer_ctl_t *);
51 
52 
53 /*
54  * hci1394_ixl_set_start()
55  *    Set up the context structure with the first ixl command to process
56  *    and the first hci descriptor to execute.
57  *
58  *    This function assumes the current context is stopped!
59  *
60  *    If ixlstp IS NOT null AND is not the first compiled ixl command and
61  *    is not an ixl label command, returns an error.
62  *    If ixlstp IS null, uses the first compiled ixl command (ixl_firstp)
63  *    in place of ixlstp.
64  *
65  *    If no executeable xfer found along exec path from ixlstp, returns error.
66  */
67 int
68 hci1394_ixl_set_start(hci1394_iso_ctxt_t *ctxtp, ixl1394_command_t *ixlstp)
69 {
70 
71 	ixl1394_command_t  *ixl_exec_startp;
72 
73 	TNF_PROBE_0_DEBUG(hci1394_ixl_set_start_enter,
74 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
75 
76 	/* if ixl start command is null, use first compiled ixl command */
77 	if (ixlstp == NULL) {
78 		ixlstp = ctxtp->ixl_firstp;
79 	}
80 
81 	/*
82 	 * if ixl start command is not first ixl compiled and is not a label,
83 	 * error
84 	 */
85 	if ((ixlstp != ctxtp->ixl_firstp) && (ixlstp->ixl_opcode !=
86 	    IXL1394_OP_LABEL)) {
87 		TNF_PROBE_0_DEBUG(hci1394_ixl_set_start_exit,
88 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
89 		return (-1);
90 	}
91 
92 	/* follow exec path to find first ixl command that's an xfer command */
93 	(void) hci1394_ixl_find_next_exec_xfer(ixlstp, NULL, &ixl_exec_startp);
94 
95 	/*
96 	 * if there was one, then in it's compiler private, its
97 	 * hci1394_xfer_ctl structure has the appropriate bound address
98 	 */
99 	if (ixl_exec_startp != NULL) {
100 
101 		/* set up for start of context and return done */
102 		ctxtp->dma_mem_execp = (uint32_t)((hci1394_xfer_ctl_t *)
103 			ixl_exec_startp->compiler_privatep)->dma[0].dma_bound;
104 
105 		ctxtp->dma_last_time = 0;
106 		ctxtp->ixl_exec_depth = 0;
107 		ctxtp->ixl_execp = ixlstp;
108 		ctxtp->rem_noadv_intrs = ctxtp->max_noadv_intrs;
109 
110 		TNF_PROBE_0_DEBUG(hci1394_ixl_set_start_exit,
111 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
112 		return (0);
113 	}
114 
115 	/* else no executeable xfer command found, return error */
116 	TNF_PROBE_0_DEBUG(hci1394_ixl_set_start_exit,
117 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
118 	return (1);
119 }
120 #ifdef _KERNEL
121 /*
122  * hci1394_ixl_reset_status()
123  * Reset all statuses in all hci descriptor blocks associated with the
124  * current linked list of compiled ixl commands.
125  *
126  * This function assumes the current context is stopped!
127  */
128 void
129 hci1394_ixl_reset_status(hci1394_iso_ctxt_t *ctxtp)
130 {
131 	ixl1394_command_t	*ixlcur;
132 	ixl1394_command_t	*ixlnext;
133 	hci1394_xfer_ctl_t	*xferctlp;
134 	uint_t			ixldepth;
135 	uint16_t		timestamp;
136 
137 	TNF_PROBE_0_DEBUG(hci1394_ixl_reset_status_enter,
138 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
139 
140 	ixlnext = ctxtp->ixl_firstp;
141 
142 	/*
143 	 * Scan for next ixl xfer start command along ixl link path.
144 	 * Once xfer command found, clear its hci descriptor block's
145 	 * status. If is composite ixl xfer command, clear statuses
146 	 * in each of its hci descriptor blocks.
147 	 */
148 	while (ixlnext != NULL) {
149 
150 		/* set current and next ixl command */
151 		ixlcur = ixlnext;
152 		ixlnext = ixlcur->next_ixlp;
153 
154 		/* skip to examine next if this is not xfer start ixl command */
155 		if (((ixlcur->ixl_opcode & IXL1394_OPF_ISXFER) == 0) ||
156 		    ((ixlcur->ixl_opcode & IXL1394_OPTY_MASK) == 0)) {
157 			continue;
158 		}
159 
160 		/* get control struct for this xfer start ixl command */
161 		xferctlp = (hci1394_xfer_ctl_t *)ixlcur->compiler_privatep;
162 
163 		/* clear status in each hci descriptor block for this ixl cmd */
164 		ixldepth = 0;
165 		while (ixldepth < xferctlp->cnt) {
166 			(void) hci1394_ixl_check_status(
167 			    &xferctlp->dma[ixldepth], ixlcur->ixl_opcode,
168 			    &timestamp, B_TRUE);
169 			ixldepth++;
170 		}
171 	}
172 
173 	TNF_PROBE_0_DEBUG(hci1394_ixl_reset_status_exit,
174 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
175 }
176 #endif
177 /*
178  * hci1394_ixl_find_next_exec_xfer()
179  *    Follows execution path of ixl linked list until finds next xfer start IXL
180  *    command, including the current IXL command or finds end of IXL linked
181  *    list. Counts callback commands found along the way. (Previously, counted
182  *    store timestamp commands, as well.)
183  *
184  *    To detect an infinite loop of label<->jump without an intervening xfer,
185  *    a tolerance level of HCI1394_IXL_MAX_SEQ_JUMPS is used.  Once this
186  *    number of jumps is traversed, the IXL prog is assumed to have a loop.
187  *
188  *    Returns DDI_SUCCESS or DDI_FAILURE.  DDI_FAILURE, indicates an infinite
189  *    loop of labels & jumps was detected without any intervening xfers.
190  *    DDI_SUCCESS indicates the next_exec_ixlpp contains the next xfer ixlp
191  *    address, or NULL indicating the end of the list was reached.  Note that
192  *    DDI_FAILURE can only be returned during the IXL compilation phase, and
193  *    not during ixl_update processing.
194  */
195 int
196 hci1394_ixl_find_next_exec_xfer(ixl1394_command_t *ixl_start,
197     uint_t *callback_cnt, ixl1394_command_t **next_exec_ixlpp)
198 {
199 	uint16_t ixlopcode;
200 	boolean_t xferfound;
201 	ixl1394_command_t *ixlp;
202 	int ii;
203 
204 	TNF_PROBE_0_DEBUG(hci1394_ixl_find_next_exec_xfer_enter,
205 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
206 
207 	ixlp = ixl_start;
208 	xferfound = B_FALSE;
209 	ii = HCI1394_IXL_MAX_SEQ_JUMPS;
210 	if (callback_cnt != NULL) {
211 		*callback_cnt = 0;
212 	}
213 
214 	/* continue until xfer start ixl cmd or end of ixl list found */
215 	while ((xferfound == B_FALSE) && (ixlp != NULL) && (ii > 0)) {
216 
217 		/* get current ixl cmd opcode without update flag */
218 		ixlopcode = ixlp->ixl_opcode & ~IXL1394_OPF_UPDATE;
219 
220 		/* if found an xfer start ixl command, are done */
221 		if (((ixlopcode & IXL1394_OPF_ISXFER) != 0) &&
222 		    ((ixlopcode & IXL1394_OPTY_MASK) != 0)) {
223 			xferfound = B_TRUE;
224 			continue;
225 		}
226 
227 		/* if found jump command, adjust to follow its path */
228 		if (ixlopcode == IXL1394_OP_JUMP) {
229 			ixlp = (ixl1394_command_t *)
230 			    ((ixl1394_jump_t *)ixlp)->label;
231 			ii--;
232 
233 			/* if exceeded tolerance, give up */
234 			if (ii == 0) {
235 				TNF_PROBE_1(
236 				    hci1394_ixl_find_next_exec_xfer_error,
237 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
238 				    errmsg, "Infinite loop w/no xfers");
239 				TNF_PROBE_0_DEBUG(
240 				    hci1394_ixl_find_next_exec_xfer_exit,
241 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
242 				return (DDI_FAILURE);
243 			}
244 			continue;
245 		}
246 
247 		/* if current ixl command is a callback, count it */
248 		if ((ixlopcode == IXL1394_OP_CALLBACK) &&
249 		    (callback_cnt != NULL)) {
250 			(*callback_cnt)++;
251 		}
252 
253 		/* advance to next linked ixl command */
254 		ixlp = ixlp->next_ixlp;
255 	}
256 
257 	/* return ixl xfer start command found, if any */
258 	*next_exec_ixlpp = ixlp;
259 
260 	TNF_PROBE_0_DEBUG(hci1394_ixl_find_next_exec_xfer_exit,
261 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
262 	return (DDI_SUCCESS);
263 }
264 #ifdef _KERNEL
265 /*
266  * hci1394_ixl_check_status()
267  *    Read the descriptor status and hdrs, clear as appropriate.
268  */
269 int32_t
270 hci1394_ixl_check_status(hci1394_xfer_ctl_dma_t *dma, uint16_t ixlopcode,
271     uint16_t *timestamp, boolean_t do_status_reset)
272 {
273 	uint16_t	bufsiz;
274 	uint16_t	hcicnt;
275 	uint16_t	hcirecvcnt;
276 	hci1394_desc_t	*hcidescp;
277 	off_t		hcidesc_off;
278 	ddi_acc_handle_t	acc_hdl;
279 	ddi_dma_handle_t	dma_hdl;
280 	uint32_t		desc_status;
281 	uint32_t		desc_hdr;
282 	int			err;
283 
284 	TNF_PROBE_0_DEBUG(hci1394_ixl_check_status_enter,
285 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
286 
287 	/* last dma descriptor in descriptor block from dma structure */
288 	hcidescp = (hci1394_desc_t *)(dma->dma_descp);
289 	hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
290 	acc_hdl  = dma->dma_buf->bi_handle;
291 	dma_hdl  = dma->dma_buf->bi_dma_handle;
292 
293 	/* if current ixl command opcode is xmit */
294 	if ((ixlopcode & IXL1394_OPF_ONXMIT) != 0) {
295 
296 		/* Sync the descriptor before we get the status */
297 		err = ddi_dma_sync(dma_hdl, hcidesc_off,
298 		    sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
299 		if (err != DDI_SUCCESS) {
300 			TNF_PROBE_1(hci1394_ixl_check_status_error,
301 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
302 			    "dma_sync() failed");
303 		}
304 		desc_status = ddi_get32(acc_hdl, &hcidescp->status);
305 
306 		/* check if status is set in last dma descriptor in block */
307 		if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
308 			/*
309 			 * dma descriptor status set - I/O done.
310 			 * if not to reset status, just return; else extract
311 			 * timestamp, reset desc status and return dma
312 			 * descriptor block status set
313 			 */
314 			if (do_status_reset == B_FALSE) {
315 				return (1);
316 			}
317 			*timestamp = (uint16_t)
318 			    ((desc_status & DESC_ST_TIMESTAMP_MASK) >>
319 			    DESC_ST_TIMESTAMP_SHIFT);
320 			ddi_put32(acc_hdl, &hcidescp->status, 0);
321 
322 			/* Sync descriptor for device (status was cleared) */
323 			err = ddi_dma_sync(dma_hdl, hcidesc_off,
324 			    sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORDEV);
325 			if (err != DDI_SUCCESS) {
326 				TNF_PROBE_1(hci1394_ixl_check_status_error,
327 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
328 				    errmsg, "dma_sync() failed");
329 			}
330 
331 			TNF_PROBE_0_DEBUG(hci1394_ixl_check_status_exit,
332 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
333 			return (1);
334 		}
335 		/* else, return dma descriptor block status not set */
336 		TNF_PROBE_0_DEBUG(hci1394_ixl_check_status_exit,
337 		    HCI1394_TNF_HAL_STACK_ISOCH, "");
338 		return (0);
339 	}
340 
341 	/* else current ixl opcode is recv */
342 	hcirecvcnt = 0;
343 
344 	/* get count of descriptors in current dma descriptor block */
345 	hcicnt = dma->dma_bound & DESC_Z_MASK;
346 	hcidescp -= (hcicnt - 1);
347 	hcidesc_off = (off_t)hcidescp - (off_t)dma->dma_buf->bi_kaddr;
348 
349 	/* iterate fwd through hci descriptors until end or find status set */
350 	while (hcicnt-- != 0) {
351 
352 		/* Sync the descriptor before we get the status */
353 		err = ddi_dma_sync(dma_hdl, hcidesc_off,
354 		    hcicnt * sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORCPU);
355 		if (err != DDI_SUCCESS) {
356 			TNF_PROBE_1(hci1394_ixl_check_status_error,
357 			    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string, errmsg,
358 			    "dma_sync() failed");
359 		}
360 
361 		desc_hdr = ddi_get32(acc_hdl, &hcidescp->hdr);
362 
363 		/* get cur buffer size & accumulate potential buffr usage */
364 		bufsiz = (desc_hdr & DESC_HDR_REQCOUNT_MASK) >>
365 		    DESC_HDR_REQCOUNT_SHIFT;
366 		hcirecvcnt += bufsiz;
367 
368 		desc_status = ddi_get32(acc_hdl, &hcidescp->status);
369 
370 		/* check if status set on this descriptor block descriptor */
371 		if ((desc_status & DESC_XFER_ACTIVE_MASK) != 0) {
372 			/*
373 			 * dma descriptor status set - I/O done.
374 			 * if not to reset status, just return; else extract
375 			 * buffer space used, reset desc status and return dma
376 			 * descriptor block status set
377 			 */
378 			if (do_status_reset == B_FALSE) {
379 				TNF_PROBE_0_DEBUG(hci1394_ixl_check_status_exit,
380 				    HCI1394_TNF_HAL_STACK_ISOCH, "");
381 				return (1);
382 			}
383 
384 			hcirecvcnt -= (desc_status & DESC_ST_RESCOUNT_MASK) >>
385 			    DESC_ST_RESCOUNT_SHIFT;
386 			*timestamp = hcirecvcnt;
387 			desc_status = (bufsiz << DESC_ST_RESCOUNT_SHIFT) &
388 			    DESC_ST_RESCOUNT_MASK;
389 			ddi_put32(acc_hdl, &hcidescp->status, desc_status);
390 
391 			/* Sync descriptor for device (status was cleared) */
392 			err = ddi_dma_sync(dma_hdl, hcidesc_off,
393 			    sizeof (hci1394_desc_t), DDI_DMA_SYNC_FORDEV);
394 			if (err != DDI_SUCCESS) {
395 				TNF_PROBE_1(hci1394_ixl_check_status_error,
396 				    HCI1394_TNF_HAL_ERROR_ISOCH, "", tnf_string,
397 				    errmsg, "dma_sync() failed");
398 			}
399 
400 			TNF_PROBE_0_DEBUG(hci1394_ixl_check_status_exit,
401 			    HCI1394_TNF_HAL_STACK_ISOCH, "");
402 			return (1);
403 		} else {
404 			/* else, set to evaluate next descriptor. */
405 			hcidescp++;
406 			hcidesc_off = (off_t)hcidescp -
407 						(off_t)dma->dma_buf->bi_kaddr;
408 		}
409 	}
410 
411 	/* return input not complete status */
412 	TNF_PROBE_0_DEBUG(hci1394_ixl_check_status_exit,
413 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
414 	return (0);
415 }
416 #endif
417 /*
418  * hci1394_ixl_cleanup()
419  *    Delete all memory that has earlier been allocated for a context's IXL prog
420  */
421 void
422 hci1394_ixl_cleanup(hci1394_state_t *soft_statep, hci1394_iso_ctxt_t *ctxtp)
423 {
424 	TNF_PROBE_0_DEBUG(hci1394_ixl_cleanup_enter,
425 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
426 
427 	hci1394_delete_xfer_ctl((hci1394_xfer_ctl_t *)ctxtp->xcs_firstp);
428 	hci1394_delete_dma_desc_mem(soft_statep, ctxtp->dma_firstp);
429 
430 	TNF_PROBE_0_DEBUG(hci1394_ixl_cleanup_exit,
431 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
432 }
433 
434 /*
435  * hci1394_delete_dma_desc_mem()
436  *    Iterate through linked list of dma memory descriptors, deleting
437  *    allocated dma memory blocks, then deleting the dma memory
438  *    descriptor after advancing to next one
439  */
440 static void
441 /* ARGSUSED */
442 hci1394_delete_dma_desc_mem(hci1394_state_t *soft_statep,
443     hci1394_idma_desc_mem_t *dma_firstp)
444 {
445 	hci1394_idma_desc_mem_t *dma_next;
446 
447 	TNF_PROBE_0_DEBUG(hci1394_delete_dma_desc_mem_enter,
448 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
449 
450 	while (dma_firstp != NULL) {
451 		dma_next = dma_firstp->dma_nextp;
452 #ifdef _KERNEL
453 		/*
454 		 * if this dma descriptor memory block has the handles, then
455 		 * free the memory.  (Note that valid handles are kept only with
456 		 * the most recently acquired cookie, and that each cookie is in
457 		 * it's own idma_desc_mem_t struct.)
458 		 */
459 		if (dma_firstp->mem_handle != NULL) {
460 			hci1394_buf_free(&dma_firstp->mem_handle);
461 		}
462 
463 		/* free current dma memory descriptor */
464 		kmem_free(dma_firstp, sizeof (hci1394_idma_desc_mem_t));
465 #else
466 		/* user mode free */
467 		/* free dma memory block and current dma mem descriptor */
468 		free(dma_firstp->mem.bi_kaddr);
469 		free(dma_firstp);
470 #endif
471 		/* advance to next dma memory descriptor */
472 		dma_firstp = dma_next;
473 	}
474 	TNF_PROBE_0_DEBUG(hci1394_delete_dma_desc_mem_exit,
475 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
476 }
477 
478 /*
479  * hci1394_delete_xfer_ctl()
480  *    Iterate thru linked list of xfer_ctl structs, deleting allocated memory.
481  */
482 void
483 hci1394_delete_xfer_ctl(hci1394_xfer_ctl_t *xcsp)
484 {
485 	hci1394_xfer_ctl_t *delp;
486 
487 	TNF_PROBE_0_DEBUG(hci1394_delete_xfer_ctl_enter,
488 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
489 
490 	while ((delp = xcsp) != NULL) {
491 		/* advance ptr to next xfer_ctl struct */
492 		xcsp = xcsp->ctl_nextp;
493 
494 		/*
495 		 * delete current xfer_ctl struct and included
496 		 * xfer_ctl_dma structs
497 		 */
498 #ifdef _KERNEL
499 		kmem_free(delp,
500 		    sizeof (hci1394_xfer_ctl_t) +
501 		    sizeof (hci1394_xfer_ctl_dma_t) * (delp->cnt - 1));
502 #else
503 		free(delp);
504 #endif
505 	}
506 	TNF_PROBE_0_DEBUG(hci1394_delete_xfer_ctl_exit,
507 	    HCI1394_TNF_HAL_STACK_ISOCH, "");
508 }
509