xref: /illumos-gate/usr/src/uts/common/io/1394/targets/av1394/av1394_isoch_recv.c (revision 4c87aefe8930bd07275b8dd2e96ea5f24d93a52e)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * av1394 isochronous receive module
28  */
29 #include <sys/1394/targets/av1394/av1394_impl.h>
30 
31 /* configuration routines */
32 static void	av1394_ir_cleanup(av1394_ic_t *, int);
33 static int	av1394_ir_build_ixl(av1394_ic_t *);
34 static void	av1394_ir_ixl_label_init(av1394_ir_ixl_data_t *,
35 		ixl1394_command_t *);
36 static void	av1394_ir_ixl_buf_init(av1394_ic_t *, ixl1394_xfer_buf_t *,
37 		av1394_isoch_seg_t *, off_t, uint64_t, uint16_t,
38 		ixl1394_command_t *);
39 static void	av1394_ir_ixl_cb_init(av1394_ic_t *, av1394_ir_ixl_data_t *,
40 		int);
41 static void	av1394_ir_ixl_jump_init(av1394_ic_t *, av1394_ir_ixl_data_t *,
42 		int);
43 static void	av1394_ir_destroy_ixl(av1394_ic_t *);
44 static int	av1394_ir_alloc_isoch_dma(av1394_ic_t *);
45 static void	av1394_ir_free_isoch_dma(av1394_ic_t *);
46 static void	av1394_ir_dma_sync_frames(av1394_ic_t *, int, int);
47 
48 /* callbacks */
49 static void	av1394_ir_ixl_frame_cb(opaque_t, struct ixl1394_callback *);
50 static void	av1394_ir_overflow_resume(av1394_ic_t *icp);
51 static void	av1394_ir_dma_stopped_cb(t1394_isoch_dma_handle_t,
52 		opaque_t, id1394_isoch_dma_stopped_t);
53 
54 /* data transfer routines */
55 static int	av1394_ir_add_frames(av1394_ic_t *, int, int);
56 static int	av1394_ir_wait_frames(av1394_ic_t *, int *, int *);
57 static int	av1394_ir_copyout(av1394_ic_t *, struct uio *, int *);
58 static void	av1394_ir_zero_pkts(av1394_ic_t *, int, int);
59 
60 /* value complementary to hi & lo watermarks (modulo number of frames) */
61 int av1394_ir_hiwat_sub = 2;
62 int av1394_ir_lowat_sub = 3;
63 int av1394_ir_dump_ixl = 0;
64 
65 #define	AV1394_TNF_ENTER(func)	\
66 	TNF_PROBE_0_DEBUG(func##_enter, AV1394_TNF_ISOCH_STACK, "");
67 
68 #define	AV1394_TNF_EXIT(func)	\
69 	TNF_PROBE_0_DEBUG(func##_exit, AV1394_TNF_ISOCH_STACK, "");
70 
71 int
72 av1394_ir_init(av1394_ic_t *icp, int *error)
73 {
74 	av1394_ir_t	*irp = &icp->ic_ir;
75 	av1394_isoch_pool_t *pool = &irp->ir_data_pool;
76 	int		nframes;
77 
78 	AV1394_TNF_ENTER(av1394_ir_init);
79 
80 	nframes = av1394_ic_alloc_pool(pool, icp->ic_framesz, icp->ic_nframes,
81 	    AV1394_IR_NFRAMES_MIN);
82 	if (nframes == 0) {
83 		*error = IEC61883_ERR_NOMEM;
84 		AV1394_TNF_EXIT(av1394_ir_init);
85 		return (EINVAL);
86 	}
87 	mutex_enter(&icp->ic_mutex);
88 	icp->ic_nframes = nframes;
89 	irp->ir_hiwat = nframes - av1394_ir_hiwat_sub;
90 	irp->ir_lowat = nframes - av1394_ir_lowat_sub;
91 
92 	if (av1394_ic_dma_setup(icp, pool) != DDI_SUCCESS) {
93 		mutex_exit(&icp->ic_mutex);
94 		*error = IEC61883_ERR_NOMEM;
95 		av1394_ir_cleanup(icp, 1);
96 		AV1394_TNF_EXIT(av1394_ir_init);
97 		return (EINVAL);
98 	}
99 
100 	if (av1394_ir_build_ixl(icp) != DDI_SUCCESS) {
101 		mutex_exit(&icp->ic_mutex);
102 		*error = IEC61883_ERR_NOMEM;
103 		av1394_ir_cleanup(icp, 2);
104 		AV1394_TNF_EXIT(av1394_ir_init);
105 		return (EINVAL);
106 	}
107 	mutex_exit(&icp->ic_mutex);
108 
109 	if (av1394_ir_alloc_isoch_dma(icp) != DDI_SUCCESS) {
110 		*error = IEC61883_ERR_NOMEM;
111 		av1394_ir_cleanup(icp, 3);
112 		AV1394_TNF_EXIT(av1394_ir_init);
113 		return (EINVAL);
114 	}
115 
116 	AV1394_TNF_EXIT(av1394_ir_init);
117 	return (0);
118 }
119 
120 void
121 av1394_ir_fini(av1394_ic_t *icp)
122 {
123 	AV1394_TNF_ENTER(av1394_ir_fini);
124 
125 	av1394_ir_cleanup(icp, AV1394_CLEANUP_LEVEL_MAX);
126 
127 	AV1394_TNF_ENTER(av1394_ir_fini);
128 }
129 
130 int
131 av1394_ir_start(av1394_ic_t *icp)
132 {
133 	av1394_inst_t	*avp = icp->ic_avp;
134 	av1394_ir_t	*irp = &icp->ic_ir;
135 	id1394_isoch_dma_ctrlinfo_t idma_ctrlinfo = { 0 };
136 	int		result;
137 	int		err;
138 	int		ret = 0;
139 
140 	AV1394_TNF_ENTER(av1394_ir_start);
141 
142 	mutex_enter(&icp->ic_mutex);
143 	if (icp->ic_state != AV1394_IC_IDLE) {
144 		mutex_exit(&icp->ic_mutex);
145 		return (0);
146 	}
147 
148 	irp->ir_first_full = 0;
149 	irp->ir_last_empty = icp->ic_nframes - 1;
150 	irp->ir_nfull = 0;
151 	irp->ir_nempty = icp->ic_nframes;
152 	irp->ir_read_cnt = 0;
153 	mutex_exit(&icp->ic_mutex);
154 
155 	err = t1394_start_isoch_dma(avp->av_t1394_hdl, icp->ic_isoch_hdl,
156 	    &idma_ctrlinfo, 0, &result);
157 	if (err == DDI_SUCCESS) {
158 		mutex_enter(&icp->ic_mutex);
159 		icp->ic_state = AV1394_IC_DMA;
160 		mutex_exit(&icp->ic_mutex);
161 	} else {
162 		TNF_PROBE_1(av1394_ir_start_error, AV1394_TNF_ISOCH_ERROR, "",
163 		    tnf_int, result, result);
164 		ret = EIO;
165 	}
166 
167 	AV1394_TNF_EXIT(av1394_ir_start);
168 	return (ret);
169 }
170 
171 int
172 av1394_ir_stop(av1394_ic_t *icp)
173 {
174 	av1394_inst_t	*avp = icp->ic_avp;
175 
176 	AV1394_TNF_ENTER(av1394_ir_stop);
177 
178 	mutex_enter(&icp->ic_mutex);
179 	if (icp->ic_state != AV1394_IC_IDLE) {
180 		mutex_exit(&icp->ic_mutex);
181 		t1394_stop_isoch_dma(avp->av_t1394_hdl, icp->ic_isoch_hdl, 0);
182 		mutex_enter(&icp->ic_mutex);
183 		icp->ic_state = AV1394_IC_IDLE;
184 	}
185 	mutex_exit(&icp->ic_mutex);
186 
187 	AV1394_TNF_EXIT(av1394_ir_stop);
188 	return (0);
189 }
190 
191 int
192 av1394_ir_recv(av1394_ic_t *icp, iec61883_recv_t *recv)
193 {
194 	int		ret = 0;
195 	int		idx, cnt;
196 
197 	idx = recv->rx_xfer.xf_empty_idx;
198 	cnt = recv->rx_xfer.xf_empty_cnt;
199 
200 	/* check arguments */
201 	if ((idx < 0) || (idx >= icp->ic_nframes) ||
202 	    (cnt < 0) || (cnt > icp->ic_nframes)) {
203 		TNF_PROBE_2(av1394_ir_recv_error_args, AV1394_TNF_ISOCH_ERROR,
204 		    "", tnf_int, idx, idx, tnf_int, cnt, cnt);
205 		return (EINVAL);
206 	}
207 
208 	mutex_enter(&icp->ic_mutex);
209 	if (cnt > 0) {
210 		/* add empty frames to the pool */
211 		if ((ret = av1394_ir_add_frames(icp, idx, cnt)) != 0) {
212 			mutex_exit(&icp->ic_mutex);
213 			return (ret);
214 		}
215 	}
216 
217 	/* wait for new frames to arrive */
218 	ret = av1394_ir_wait_frames(icp,
219 	    &recv->rx_xfer.xf_full_idx, &recv->rx_xfer.xf_full_cnt);
220 	mutex_exit(&icp->ic_mutex);
221 
222 	return (ret);
223 }
224 
225 int
226 av1394_ir_read(av1394_ic_t *icp, struct uio *uiop)
227 {
228 	av1394_ir_t	*irp = &icp->ic_ir;
229 	int		ret = 0;
230 	int		empty_cnt;
231 
232 	AV1394_TNF_ENTER(av1394_ir_read);
233 
234 	mutex_enter(&icp->ic_mutex);
235 	while (uiop->uio_resid) {
236 		/* wait for full frames, if necessary */
237 		if (irp->ir_read_cnt == 0) {
238 			irp->ir_read_off = 0;
239 			ret = av1394_ir_wait_frames(icp,
240 			    &irp->ir_read_idx, &irp->ir_read_cnt);
241 			if (ret != 0) {
242 				mutex_exit(&icp->ic_mutex);
243 				AV1394_TNF_EXIT(av1394_ir_read);
244 				return (ret);
245 			}
246 		}
247 
248 		/* copyout the data */
249 		ret = av1394_ir_copyout(icp, uiop, &empty_cnt);
250 
251 		/* return freed frames to the pool */
252 		if (empty_cnt > 0) {
253 			av1394_ir_zero_pkts(icp, irp->ir_read_idx, empty_cnt);
254 			ret = av1394_ir_add_frames(icp, irp->ir_read_idx,
255 			    empty_cnt);
256 			irp->ir_read_idx += empty_cnt;
257 			irp->ir_read_idx %= icp->ic_nframes;
258 			irp->ir_read_cnt -= empty_cnt;
259 		}
260 	}
261 	mutex_exit(&icp->ic_mutex);
262 
263 	AV1394_TNF_EXIT(av1394_ir_read);
264 	return (ret);
265 }
266 
267 /*
268  *
269  * --- configuration routines
270  *
271  */
272 static void
273 av1394_ir_cleanup(av1394_ic_t *icp, int level)
274 {
275 	av1394_isoch_pool_t *pool = &icp->ic_ir.ir_data_pool;
276 
277 	ASSERT((level > 0) && (level <= AV1394_CLEANUP_LEVEL_MAX));
278 
279 	switch (level) {
280 	default:
281 		av1394_ir_free_isoch_dma(icp);
282 		/* FALLTHRU */
283 	case 3:
284 		av1394_ir_destroy_ixl(icp);
285 		/* FALLTHRU */
286 	case 2:
287 		av1394_ic_dma_cleanup(icp, pool);
288 		/* FALLTHRU */
289 	case 1:
290 		av1394_ic_free_pool(pool);
291 		/* FALLTHRU */
292 	}
293 }
294 
295 /*
296  * av1394_ir_build_ixl()
297  *    Build an IXL chain to receive CIP data. The smallest instance of data
298  *    that can be received is a packet, typically 512 bytes. Frames consist
299  *    of a number of packets, typically 250-300. Packet size, frame size and
300  *    number of frames allocated are set by a user process. The received data
301  *    made available to the user process in full frames, hence there an IXL
302  *    callback at the end of each frame. A sequence of IXL commands that
303  *    receives one frame is further referred to as an IXL data block.
304  *
305  *    During normal operation, frames are in a circular list and IXL chain
306  *    does not change. When the user process does not keep up with the
307  *    data flow and there are too few empty frames left, the jump following
308  *    last empty frame is dynamically updated to point to NULL -- otherwise
309  *    the first full frame would be overwritten. When IXL execution reaches
310  *    the nulled jump, it just waits until the driver updates it again or
311  *    stops the transfer. Once a user process frees up enough frames, the
312  *    jump is restored and transfer continues. User process will be able to
313  *    detect dropped packets using continuity conters embedded in the data.
314  *
315  *    Because RECV_BUF buffer size is limited to AV1394_IXL_BUFSZ_MAX, and due
316  *    to isoch pool segmentaion, the number of RECV_BUF commands per IXL data
317  *    block depends on frame size. Also, to simplify calculations, we consider
318  *    a sequence of RECV_BUF commands to consist of two parts: zero or more
319  *    equal-sized RECV_BUF commands followed by one "tail" REC_BUF command,
320  *    whose size may not be equal to others.
321  *
322  *    Schematically the IXL chain looks like this:
323  *
324  *    ...
325  *    LABEL N;
326  *    RECV_BUF(buf)
327  *    ...
328  *    RECV_BUF(tail)
329  *    CALLBACK(frame done);
330  *    JUMP_U(LABEL (N+1)%nframes or NULL);
331  *    ...
332  */
333 static int
334 av1394_ir_build_ixl(av1394_ic_t *icp)
335 {
336 	av1394_ir_t		*irp = &icp->ic_ir;
337 	av1394_isoch_pool_t	*pool = &irp->ir_data_pool;
338 	int			i;	/* segment index */
339 	int			j;
340 	int			fi;	/* frame index */
341 	int			bi;	/* buffer index */
342 
343 	AV1394_TNF_ENTER(av1394_ir_build_ixl);
344 
345 	/* allocate space for IXL data blocks */
346 	irp->ir_ixl_data = kmem_zalloc(icp->ic_nframes *
347 	    sizeof (av1394_ir_ixl_data_t), KM_SLEEP);
348 
349 	/*
350 	 * We have a bunch of segments, and each is divided into cookies.  We
351 	 * need to cover the segments with RECV_BUFs such that they
352 	 *   - don't span cookies
353 	 *   - don't span frames
354 	 *   - are at most AV1394_IXL_BUFSZ_MAX
355 	 *
356 	 * The straightforward algorithm is to start from the beginning, find
357 	 * the next lowest frame or cookie boundary, and either make a buf for
358 	 * it if it is smaller than AV1394_IXL_BUFSZ_MAX, or make multiple
359 	 * bufs for it as with av1394_ic_ixl_seg_decomp().  And repeat.
360 	 */
361 
362 	irp->ir_ixl_nbufs = 0;
363 	for (i = 0; i < pool->ip_nsegs; ++i) {
364 		av1394_isoch_seg_t *isp = &pool->ip_seg[i];
365 		size_t dummy1, dummy2;
366 
367 		uint_t off = 0;
368 		uint_t end;
369 
370 		uint_t frame_end = icp->ic_framesz;
371 		int ci = 0;
372 		uint_t cookie_end = isp->is_dma_cookie[ci].dmac_size;
373 
374 		for (;;) {
375 			end = min(frame_end, cookie_end);
376 
377 			if (end - off <= AV1394_IXL_BUFSZ_MAX) {
378 				++irp->ir_ixl_nbufs;
379 			} else {
380 				irp->ir_ixl_nbufs += av1394_ic_ixl_seg_decomp(
381 				    end - off, icp->ic_pktsz, &dummy1, &dummy2);
382 				/* count the tail buffer */
383 				++irp->ir_ixl_nbufs;
384 			}
385 
386 			off = end;
387 			if (off >= isp->is_size)
388 				break;
389 
390 			if (off == frame_end)
391 				frame_end += icp->ic_framesz;
392 			if (off == cookie_end) {
393 				++ci;
394 				cookie_end += isp->is_dma_cookie[ci].dmac_size;
395 			}
396 		}
397 	}
398 
399 	irp->ir_ixl_buf = kmem_zalloc(irp->ir_ixl_nbufs *
400 	    sizeof (ixl1394_xfer_buf_t), KM_SLEEP);
401 
402 
403 	fi = 0;
404 	bi = 0;
405 
406 	for (i = 0; i < pool->ip_nsegs; ++i) {
407 		av1394_isoch_seg_t *isp = &pool->ip_seg[i];
408 
409 		uint_t off = 0;		/* offset into segment */
410 		uint_t end;
411 		uint_t coff = 0;	/* offset into cookie */
412 
413 
414 		uint_t frame_end = icp->ic_framesz;
415 		int ci = 0;
416 		uint_t cookie_end = isp->is_dma_cookie[ci].dmac_size;
417 
418 		ixl1394_command_t *nextp;
419 
420 		av1394_ir_ixl_label_init(&irp->ir_ixl_data[fi],
421 		    (ixl1394_command_t *)&irp->ir_ixl_buf[bi]);
422 
423 		for (;;) {
424 			end = min(frame_end, cookie_end);
425 
426 			if (end == frame_end)
427 				nextp = (ixl1394_command_t *)
428 				    &irp->ir_ixl_data[fi].rd_cb;
429 			else
430 				nextp = (ixl1394_command_t *)
431 				    &irp->ir_ixl_buf[bi + 1];
432 
433 			if (end - off <= AV1394_IXL_BUFSZ_MAX) {
434 				av1394_ir_ixl_buf_init(icp,
435 				    &irp->ir_ixl_buf[bi], isp, off,
436 				    isp->is_dma_cookie[ci].dmac_laddress + coff,
437 				    end - off, nextp);
438 				coff += end - off;
439 				off = end;
440 				++bi;
441 			} else {
442 				size_t reg, tail;
443 				uint_t nbufs;
444 
445 				nbufs = av1394_ic_ixl_seg_decomp(end - off,
446 				    icp->ic_pktsz, &reg, &tail);
447 
448 				for (j = 0; j < nbufs; ++j) {
449 					av1394_ir_ixl_buf_init(icp,
450 					    &irp->ir_ixl_buf[bi], isp, off,
451 					    isp->is_dma_cookie[ci].
452 					    dmac_laddress + coff, reg,
453 					    (ixl1394_command_t *)
454 					    &irp->ir_ixl_buf[bi + 1]);
455 					++bi;
456 					off += reg;
457 					coff += reg;
458 				}
459 
460 				av1394_ir_ixl_buf_init(icp,
461 				    &irp->ir_ixl_buf[bi], isp, off,
462 				    isp->is_dma_cookie[ci].dmac_laddress + coff,
463 				    tail, nextp);
464 				++bi;
465 				off += tail;
466 				coff += tail;
467 			}
468 
469 			ASSERT((off == frame_end) || (off == cookie_end));
470 
471 			if (off >= isp->is_size)
472 				break;
473 
474 			if (off == frame_end) {
475 				av1394_ir_ixl_cb_init(icp,
476 				    &irp->ir_ixl_data[fi], fi);
477 				av1394_ir_ixl_jump_init(icp,
478 				    &irp->ir_ixl_data[fi], fi);
479 				++fi;
480 				frame_end += icp->ic_framesz;
481 				av1394_ir_ixl_label_init(&irp->ir_ixl_data[fi],
482 				    (ixl1394_command_t *)&irp->ir_ixl_buf[bi]);
483 			}
484 
485 			if (off == cookie_end) {
486 				++ci;
487 				cookie_end += isp->is_dma_cookie[ci].dmac_size;
488 				coff = 0;
489 			}
490 		}
491 
492 		av1394_ir_ixl_cb_init(icp, &irp->ir_ixl_data[fi], fi);
493 		av1394_ir_ixl_jump_init(icp, &irp->ir_ixl_data[fi], fi);
494 		++fi;
495 	}
496 
497 	ASSERT(fi == icp->ic_nframes);
498 	ASSERT(bi == irp->ir_ixl_nbufs);
499 
500 	irp->ir_ixlp = (ixl1394_command_t *)irp->ir_ixl_data;
501 
502 	if (av1394_ir_dump_ixl) {
503 		av1394_ic_ixl_dump(irp->ir_ixlp);
504 	}
505 
506 	AV1394_TNF_EXIT(av1394_ir_build_ixl);
507 	return (DDI_SUCCESS);
508 }
509 
510 static void
511 av1394_ir_ixl_label_init(av1394_ir_ixl_data_t *dp, ixl1394_command_t *nextp)
512 {
513 	dp->rd_label.ixl_opcode = IXL1394_OP_LABEL;
514 	dp->rd_label.next_ixlp	= nextp;
515 }
516 
517 static void
518 av1394_ir_ixl_buf_init(av1394_ic_t *icp, ixl1394_xfer_buf_t *buf,
519 	av1394_isoch_seg_t *isp, off_t offset, uint64_t addr, uint16_t size,
520 	ixl1394_command_t *nextp)
521 {
522 	buf->ixl_opcode = IXL1394_OP_RECV_BUF;
523 	buf->size = size;
524 	buf->pkt_size = icp->ic_pktsz;
525 	buf->ixl_buf._dmac_ll = addr;
526 	buf->mem_bufp = isp->is_kaddr + offset;
527 	buf->next_ixlp = nextp;
528 }
529 
530 /*ARGSUSED*/
531 static void
532 av1394_ir_ixl_cb_init(av1394_ic_t *icp, av1394_ir_ixl_data_t *dp, int i)
533 {
534 	dp->rd_cb.ixl_opcode = IXL1394_OP_CALLBACK;
535 	dp->rd_cb.callback = av1394_ir_ixl_frame_cb;
536 	dp->rd_cb.callback_arg = (void *)(intptr_t)i;
537 	dp->rd_cb.next_ixlp = (ixl1394_command_t *)&dp->rd_jump;
538 }
539 
540 static void
541 av1394_ir_ixl_jump_init(av1394_ic_t *icp, av1394_ir_ixl_data_t *dp, int i)
542 {
543 	av1394_ir_t	*irp = &icp->ic_ir;
544 	int		next_idx;
545 	ixl1394_command_t *jump_cmd;
546 
547 	next_idx = (i + 1) % icp->ic_nframes;
548 	jump_cmd = (ixl1394_command_t *)&irp->ir_ixl_data[next_idx];
549 
550 	dp->rd_jump.ixl_opcode	= IXL1394_OP_JUMP_U;
551 	dp->rd_jump.label = jump_cmd;
552 	dp->rd_jump.next_ixlp = (next_idx != 0) ? jump_cmd : NULL;
553 }
554 
555 static void
556 av1394_ir_destroy_ixl(av1394_ic_t *icp)
557 {
558 	av1394_ir_t		*irp = &icp->ic_ir;
559 
560 	AV1394_TNF_ENTER(av1394_ir_destroy_ixl);
561 
562 	mutex_enter(&icp->ic_mutex);
563 	kmem_free(irp->ir_ixl_buf,
564 	    irp->ir_ixl_nbufs * sizeof (ixl1394_xfer_buf_t));
565 	kmem_free(irp->ir_ixl_data,
566 	    icp->ic_nframes * sizeof (av1394_ir_ixl_data_t));
567 
568 	irp->ir_ixlp = NULL;
569 	irp->ir_ixl_buf = NULL;
570 	irp->ir_ixl_data = NULL;
571 	mutex_exit(&icp->ic_mutex);
572 
573 	AV1394_TNF_EXIT(av1394_ir_destroy_ixl);
574 }
575 
576 static int
577 av1394_ir_alloc_isoch_dma(av1394_ic_t *icp)
578 {
579 	av1394_inst_t		*avp = icp->ic_avp;
580 	av1394_ir_t		*irp = &icp->ic_ir;
581 	id1394_isoch_dmainfo_t	di;
582 	int			result;
583 	int			ret;
584 
585 	AV1394_TNF_ENTER(av1394_ir_alloc_isoch_dma);
586 
587 	di.ixlp = irp->ir_ixlp;
588 	di.channel_num = icp->ic_num;
589 	di.global_callback_arg = icp;
590 	di.idma_options = ID1394_LISTEN_PKT_MODE;
591 	di.isoch_dma_stopped = av1394_ir_dma_stopped_cb;
592 	di.idma_evt_arg = icp;
593 
594 	if ((ret = t1394_alloc_isoch_dma(avp->av_t1394_hdl, &di, 0,
595 	    &icp->ic_isoch_hdl, &result)) != DDI_SUCCESS) {
596 		TNF_PROBE_1(av1394_ir_alloc_isoch_dma_error,
597 		    AV1394_TNF_ISOCH_ERROR, "", tnf_int, result, result);
598 	}
599 
600 	AV1394_TNF_EXIT(av1394_ir_alloc_isoch_dma);
601 	return (ret);
602 }
603 
604 static void
605 av1394_ir_free_isoch_dma(av1394_ic_t *icp)
606 {
607 	av1394_inst_t		*avp = icp->ic_avp;
608 
609 	AV1394_TNF_ENTER(av1394_ir_free_isoch_rsrc);
610 
611 	t1394_free_isoch_dma(avp->av_t1394_hdl, 0, &icp->ic_isoch_hdl);
612 
613 	AV1394_TNF_EXIT(av1394_ir_free_isoch_rsrc);
614 }
615 
616 static void
617 av1394_ir_dma_sync_frames(av1394_ic_t *icp, int idx, int cnt)
618 {
619 	av1394_ic_dma_sync_frames(icp, idx, cnt,
620 	    &icp->ic_ir.ir_data_pool, DDI_DMA_SYNC_FORCPU);
621 }
622 
623 /*
624  *
625  * --- callbacks
626  *
627  */
628 /*ARGSUSED*/
629 static void
630 av1394_ir_ixl_frame_cb(opaque_t arg, struct ixl1394_callback *cb)
631 {
632 	av1394_ic_t	*icp = arg;
633 	av1394_isoch_t	*ip = &icp->ic_avp->av_i;
634 	av1394_ir_t	*irp = &icp->ic_ir;
635 
636 	AV1394_TNF_ENTER(av1394_ir_ixl_frame_cb);
637 
638 	mutex_enter(&ip->i_mutex);
639 	mutex_enter(&icp->ic_mutex);
640 	if (irp->ir_nfull < icp->ic_nframes) {
641 		irp->ir_nfull++;
642 		irp->ir_nempty--;
643 		cv_broadcast(&icp->ic_xfer_cv);
644 
645 		/*
646 		 * signal the overflow condition early, so we get enough
647 		 * time to handle it before old data is overwritten
648 		 */
649 		if (irp->ir_nfull >= irp->ir_hiwat) {
650 			av1394_ic_trigger_softintr(icp, icp->ic_num,
651 			    AV1394_PREQ_IR_OVERFLOW);
652 		}
653 	}
654 	mutex_exit(&icp->ic_mutex);
655 	mutex_exit(&ip->i_mutex);
656 
657 	AV1394_TNF_EXIT(av1394_ir_ixl_frame_cb);
658 }
659 
660 /*
661  * received data overflow
662  */
663 void
664 av1394_ir_overflow(av1394_ic_t *icp)
665 {
666 	av1394_inst_t	*avp = icp->ic_avp;
667 	av1394_ir_t	*irp = &icp->ic_ir;
668 	int		idx;
669 	ixl1394_jump_t	*old_jmp;
670 	ixl1394_jump_t	new_jmp;
671 	id1394_isoch_dma_updateinfo_t update_info;
672 	int		err;
673 	int		result;
674 
675 	AV1394_TNF_ENTER(av1394_ir_overflow);
676 
677 	/*
678 	 * in the circular IXL chain overflow means overwriting the least
679 	 * recent data. to avoid that, we suspend the transfer by NULL'ing
680 	 * the last IXL block until the user process frees up some frames.
681 	 */
682 	idx = irp->ir_last_empty;
683 
684 	old_jmp = &irp->ir_ixl_data[idx].rd_jump;
685 
686 	new_jmp.ixl_opcode = IXL1394_OP_JUMP_U;
687 	new_jmp.label = NULL;
688 	new_jmp.next_ixlp = NULL;
689 
690 	update_info.orig_ixlp = (ixl1394_command_t *)old_jmp;
691 	update_info.temp_ixlp = (ixl1394_command_t *)&new_jmp;
692 	update_info.ixl_count = 1;
693 
694 	mutex_exit(&icp->ic_mutex);
695 	err = t1394_update_isoch_dma(avp->av_t1394_hdl, icp->ic_isoch_hdl,
696 	    &update_info, 0, &result);
697 	mutex_enter(&icp->ic_mutex);
698 
699 	if (err == DDI_SUCCESS) {
700 		irp->ir_overflow_idx = idx;
701 		icp->ic_state = AV1394_IC_SUSPENDED;
702 	} else {
703 		TNF_PROBE_2(av1394_ir_overflow_error_update,
704 		    AV1394_TNF_ISOCH_ERROR, "", tnf_int, err, err,
705 		    tnf_int, result, result);
706 	}
707 
708 	AV1394_TNF_EXIT(av1394_ir_overflow);
709 }
710 
711 /*
712  * restore from overflow condition
713  */
714 static void
715 av1394_ir_overflow_resume(av1394_ic_t *icp)
716 {
717 	av1394_inst_t	*avp = icp->ic_avp;
718 	av1394_ir_t	*irp = &icp->ic_ir;
719 	int		idx, next_idx;
720 	ixl1394_jump_t	*old_jmp;
721 	ixl1394_jump_t	new_jmp;
722 	id1394_isoch_dma_updateinfo_t update_info;
723 	int		err;
724 	int		result;
725 
726 	AV1394_TNF_ENTER(av1394_ir_overflow_resume);
727 
728 	/*
729 	 * restore the jump command we NULL'ed in av1394_ir_overflow()
730 	 */
731 	idx = irp->ir_overflow_idx;
732 	next_idx = (idx + 1) % icp->ic_nframes;
733 
734 	old_jmp = &irp->ir_ixl_data[idx].rd_jump;
735 
736 	new_jmp.ixl_opcode = IXL1394_OP_JUMP_U;
737 	new_jmp.label = (ixl1394_command_t *)&irp->ir_ixl_data[next_idx];
738 	new_jmp.next_ixlp = NULL;
739 
740 	update_info.orig_ixlp = (ixl1394_command_t *)old_jmp;
741 	update_info.temp_ixlp = (ixl1394_command_t *)&new_jmp;
742 	update_info.ixl_count = 1;
743 
744 	mutex_exit(&icp->ic_mutex);
745 	err = t1394_update_isoch_dma(avp->av_t1394_hdl,
746 	    icp->ic_isoch_hdl, &update_info, 0, &result);
747 	mutex_enter(&icp->ic_mutex);
748 
749 	if (err == DDI_SUCCESS) {
750 		icp->ic_state = AV1394_IC_DMA;
751 	} else {
752 		TNF_PROBE_2(av1394_ir_overflow_resume_error_update,
753 		    AV1394_TNF_ISOCH_ERROR, "", tnf_int, err, err,
754 		    tnf_int, result, result);
755 	}
756 
757 	AV1394_TNF_EXIT(av1394_ir_overflow_resume);
758 }
759 
760 /*ARGSUSED*/
761 static void
762 av1394_ir_dma_stopped_cb(t1394_isoch_dma_handle_t t1394_idma_hdl,
763 	opaque_t idma_evt_arg, id1394_isoch_dma_stopped_t status)
764 {
765 	av1394_ic_t	*icp = idma_evt_arg;
766 
767 	AV1394_TNF_ENTER(av1394_ir_dma_stopped_cb);
768 
769 	mutex_enter(&icp->ic_mutex);
770 	icp->ic_state = AV1394_IC_IDLE;
771 	mutex_exit(&icp->ic_mutex);
772 
773 	AV1394_TNF_EXIT(av1394_ir_dma_stopped_cb);
774 }
775 
776 
777 /*
778  *
779  * --- data transfer routines
780  *
781  * av1394_ir_add_frames()
782  *    Add empty frames to the pool.
783  */
784 static int
785 av1394_ir_add_frames(av1394_ic_t *icp, int idx, int cnt)
786 {
787 	av1394_ir_t	*irp = &icp->ic_ir;
788 
789 	/* can only add to the tail */
790 	if (idx != ((irp->ir_last_empty + 1) % icp->ic_nframes)) {
791 		TNF_PROBE_1(av1394_ir_add_frames_error,
792 		    AV1394_TNF_ISOCH_ERROR, "", tnf_int, idx, idx);
793 		return (EINVAL);
794 	}
795 
796 	/* turn full frames into empty ones */
797 	irp->ir_nfull -= cnt;
798 	irp->ir_first_full = (irp->ir_first_full + cnt) % icp->ic_nframes;
799 	irp->ir_nempty += cnt;
800 	irp->ir_last_empty = (irp->ir_last_empty + cnt) % icp->ic_nframes;
801 	ASSERT((irp->ir_nfull >= 0) && (irp->ir_nempty <= icp->ic_nframes));
802 
803 	/* if suspended due to overflow, check if iwe can resume */
804 	if ((icp->ic_state == AV1394_IC_SUSPENDED) &&
805 	    (irp->ir_nempty >= irp->ir_lowat)) {
806 		av1394_ir_overflow_resume(icp);
807 	}
808 
809 	return (0);
810 }
811 
812 static int
813 av1394_ir_wait_frames(av1394_ic_t *icp, int *idx, int *cnt)
814 {
815 	av1394_ir_t	*irp = &icp->ic_ir;
816 	int		ret = 0;
817 
818 	while (irp->ir_nfull == 0) {
819 		if (cv_wait_sig(&icp->ic_xfer_cv, &icp->ic_mutex) <= 0) {
820 			ret = EINTR;
821 			break;
822 		}
823 	}
824 	if (irp->ir_nfull > 0) {
825 		*idx = irp->ir_first_full;
826 		*cnt = irp->ir_nfull;
827 		av1394_ir_dma_sync_frames(icp, *idx, *cnt);
828 		ret = 0;
829 	}
830 	return (ret);
831 }
832 
833 /*
834  * copyout the data, adjust to data format and remove empty CIPs if possible
835  */
836 static int
837 av1394_ir_copyout(av1394_ic_t *icp, struct uio *uiop, int *empty_cnt)
838 {
839 	av1394_ir_t	*irp = &icp->ic_ir;
840 	av1394_isoch_seg_t *seg = irp->ir_data_pool.ip_seg;
841 	int		idx = irp->ir_read_idx;
842 	int		cnt = irp->ir_read_cnt;
843 	int		pktsz = icp->ic_pktsz;
844 	int		bs;		/* data block size */
845 	caddr_t		kaddr_begin, kaddr;
846 	int		pkt_off;	/* offset into current packet */
847 	int		len;
848 	int		frame_resid;	/* bytes left in the current frame */
849 	int		ret = 0;
850 
851 	*empty_cnt = 0;
852 
853 	/* DBS -> block size */
854 	bs = *(uchar_t *)(seg[idx].is_kaddr + 1) * 4 + AV1394_CIPSZ;
855 	if ((bs > pktsz) || (bs < AV1394_CIPSZ + 8)) {
856 		bs = pktsz;
857 	}
858 
859 	while ((cnt > 0) && (uiop->uio_resid > 0) && (ret == 0)) {
860 		kaddr = kaddr_begin = seg[idx].is_kaddr + irp->ir_read_off;
861 		frame_resid = icp->ic_framesz - irp->ir_read_off;
862 
863 		mutex_exit(&icp->ic_mutex);
864 		/* copyout data blocks, skipping empty CIPs */
865 		while ((uiop->uio_resid > 0) && (frame_resid > 0)) {
866 			pkt_off = (uintptr_t)kaddr % pktsz;
867 			/*
868 			 * a quadlet following CIP header can't be zero
869 			 * unless in an empty packet
870 			 */
871 			if ((pkt_off == 0) &&
872 			    (*(uint32_t *)(kaddr + AV1394_CIPSZ) == 0)) {
873 				kaddr += pktsz;
874 				frame_resid -= pktsz;
875 				continue;
876 			}
877 
878 			len = bs - pkt_off;
879 			if (len > uiop->uio_resid) {
880 				len = uiop->uio_resid;
881 			}
882 			if (len > frame_resid) {
883 				len = frame_resid;
884 			}
885 			if ((ret = uiomove(kaddr, len, UIO_READ, uiop)) != 0) {
886 				break;
887 			}
888 
889 			if (pkt_off + len == bs) {
890 				kaddr += pktsz - pkt_off;
891 				frame_resid -= pktsz - pkt_off;
892 			} else {
893 				kaddr += len;
894 				frame_resid -= len;
895 			}
896 		}
897 		mutex_enter(&icp->ic_mutex);
898 
899 		if (frame_resid > 0) {
900 			irp->ir_read_off = kaddr - kaddr_begin;
901 		} else {
902 			irp->ir_read_off = 0;
903 			idx = (idx + 1) % icp->ic_nframes;
904 			cnt--;
905 			(*empty_cnt)++;
906 		}
907 	}
908 
909 	return (ret);
910 }
911 
912 /*
913  * zero a quadlet in each packet so we can recognize empty CIPs
914  */
915 static void
916 av1394_ir_zero_pkts(av1394_ic_t *icp, int idx, int cnt)
917 {
918 	av1394_ir_t	*irp = &icp->ic_ir;
919 	av1394_isoch_seg_t *seg = irp->ir_data_pool.ip_seg;
920 	caddr_t		kaddr, kaddr_end;
921 	int		pktsz = icp->ic_pktsz;
922 	int		i;
923 
924 	for (i = cnt; i > 0; i--) {
925 		kaddr = seg[idx].is_kaddr + AV1394_CIPSZ;
926 		kaddr_end = seg[idx].is_kaddr + icp->ic_framesz;
927 		do {
928 			*(uint32_t *)kaddr = 0;
929 			kaddr += pktsz;
930 		} while (kaddr < kaddr_end);
931 
932 		idx = (idx + 1) % icp->ic_nframes;
933 	}
934 }
935