1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26 /* 27 * av1394 isochronous receive module 28 */ 29 #include <sys/1394/targets/av1394/av1394_impl.h> 30 31 /* configuration routines */ 32 static void av1394_ir_cleanup(av1394_ic_t *, int); 33 static int av1394_ir_build_ixl(av1394_ic_t *); 34 static void av1394_ir_ixl_label_init(av1394_ir_ixl_data_t *, 35 ixl1394_command_t *); 36 static void av1394_ir_ixl_buf_init(av1394_ic_t *, ixl1394_xfer_buf_t *, 37 av1394_isoch_seg_t *, off_t, uint64_t, uint16_t, 38 ixl1394_command_t *); 39 static void av1394_ir_ixl_cb_init(av1394_ic_t *, av1394_ir_ixl_data_t *, 40 int); 41 static void av1394_ir_ixl_jump_init(av1394_ic_t *, av1394_ir_ixl_data_t *, 42 int); 43 static void av1394_ir_destroy_ixl(av1394_ic_t *); 44 static int av1394_ir_alloc_isoch_dma(av1394_ic_t *); 45 static void av1394_ir_free_isoch_dma(av1394_ic_t *); 46 static void av1394_ir_dma_sync_frames(av1394_ic_t *, int, int); 47 48 /* callbacks */ 49 static void av1394_ir_ixl_frame_cb(opaque_t, struct ixl1394_callback *); 50 static void av1394_ir_overflow_resume(av1394_ic_t *icp); 51 static void av1394_ir_dma_stopped_cb(t1394_isoch_dma_handle_t, 52 opaque_t, id1394_isoch_dma_stopped_t); 53 54 /* data transfer routines */ 55 static int av1394_ir_add_frames(av1394_ic_t *, int, int); 56 static int av1394_ir_wait_frames(av1394_ic_t *, int *, int *); 57 static int av1394_ir_copyout(av1394_ic_t *, struct uio *, int *); 58 static void av1394_ir_zero_pkts(av1394_ic_t *, int, int); 59 60 /* value complementary to hi & lo watermarks (modulo number of frames) */ 61 int av1394_ir_hiwat_sub = 2; 62 int av1394_ir_lowat_sub = 3; 63 int av1394_ir_dump_ixl = 0; 64 65 int 66 av1394_ir_init(av1394_ic_t *icp, int *error) 67 { 68 av1394_ir_t *irp = &icp->ic_ir; 69 av1394_isoch_pool_t *pool = &irp->ir_data_pool; 70 int nframes; 71 72 nframes = av1394_ic_alloc_pool(pool, icp->ic_framesz, icp->ic_nframes, 73 AV1394_IR_NFRAMES_MIN); 74 if (nframes == 0) { 75 *error = IEC61883_ERR_NOMEM; 76 return (EINVAL); 77 } 78 mutex_enter(&icp->ic_mutex); 79 icp->ic_nframes = nframes; 80 irp->ir_hiwat = nframes - av1394_ir_hiwat_sub; 81 irp->ir_lowat = nframes - av1394_ir_lowat_sub; 82 83 if (av1394_ic_dma_setup(icp, pool) != DDI_SUCCESS) { 84 mutex_exit(&icp->ic_mutex); 85 *error = IEC61883_ERR_NOMEM; 86 av1394_ir_cleanup(icp, 1); 87 return (EINVAL); 88 } 89 90 if (av1394_ir_build_ixl(icp) != DDI_SUCCESS) { 91 mutex_exit(&icp->ic_mutex); 92 *error = IEC61883_ERR_NOMEM; 93 av1394_ir_cleanup(icp, 2); 94 return (EINVAL); 95 } 96 mutex_exit(&icp->ic_mutex); 97 98 if (av1394_ir_alloc_isoch_dma(icp) != DDI_SUCCESS) { 99 *error = IEC61883_ERR_NOMEM; 100 av1394_ir_cleanup(icp, 3); 101 return (EINVAL); 102 } 103 104 return (0); 105 } 106 107 void 108 av1394_ir_fini(av1394_ic_t *icp) 109 { 110 av1394_ir_cleanup(icp, AV1394_CLEANUP_LEVEL_MAX); 111 } 112 113 int 114 av1394_ir_start(av1394_ic_t *icp) 115 { 116 av1394_inst_t *avp = icp->ic_avp; 117 av1394_ir_t *irp = &icp->ic_ir; 118 id1394_isoch_dma_ctrlinfo_t idma_ctrlinfo = { 0 }; 119 int result; 120 int err; 121 int ret = 0; 122 123 mutex_enter(&icp->ic_mutex); 124 if (icp->ic_state != AV1394_IC_IDLE) { 125 mutex_exit(&icp->ic_mutex); 126 return (0); 127 } 128 129 irp->ir_first_full = 0; 130 irp->ir_last_empty = icp->ic_nframes - 1; 131 irp->ir_nfull = 0; 132 irp->ir_nempty = icp->ic_nframes; 133 irp->ir_read_cnt = 0; 134 mutex_exit(&icp->ic_mutex); 135 136 err = t1394_start_isoch_dma(avp->av_t1394_hdl, icp->ic_isoch_hdl, 137 &idma_ctrlinfo, 0, &result); 138 if (err == DDI_SUCCESS) { 139 mutex_enter(&icp->ic_mutex); 140 icp->ic_state = AV1394_IC_DMA; 141 mutex_exit(&icp->ic_mutex); 142 } else { 143 ret = EIO; 144 } 145 146 return (ret); 147 } 148 149 int 150 av1394_ir_stop(av1394_ic_t *icp) 151 { 152 av1394_inst_t *avp = icp->ic_avp; 153 154 mutex_enter(&icp->ic_mutex); 155 if (icp->ic_state != AV1394_IC_IDLE) { 156 mutex_exit(&icp->ic_mutex); 157 t1394_stop_isoch_dma(avp->av_t1394_hdl, icp->ic_isoch_hdl, 0); 158 mutex_enter(&icp->ic_mutex); 159 icp->ic_state = AV1394_IC_IDLE; 160 } 161 mutex_exit(&icp->ic_mutex); 162 163 return (0); 164 } 165 166 int 167 av1394_ir_recv(av1394_ic_t *icp, iec61883_recv_t *recv) 168 { 169 int ret = 0; 170 int idx, cnt; 171 172 idx = recv->rx_xfer.xf_empty_idx; 173 cnt = recv->rx_xfer.xf_empty_cnt; 174 175 /* check arguments */ 176 if ((idx < 0) || (idx >= icp->ic_nframes) || 177 (cnt < 0) || (cnt > icp->ic_nframes)) { 178 return (EINVAL); 179 } 180 181 mutex_enter(&icp->ic_mutex); 182 if (cnt > 0) { 183 /* add empty frames to the pool */ 184 if ((ret = av1394_ir_add_frames(icp, idx, cnt)) != 0) { 185 mutex_exit(&icp->ic_mutex); 186 return (ret); 187 } 188 } 189 190 /* wait for new frames to arrive */ 191 ret = av1394_ir_wait_frames(icp, 192 &recv->rx_xfer.xf_full_idx, &recv->rx_xfer.xf_full_cnt); 193 mutex_exit(&icp->ic_mutex); 194 195 return (ret); 196 } 197 198 int 199 av1394_ir_read(av1394_ic_t *icp, struct uio *uiop) 200 { 201 av1394_ir_t *irp = &icp->ic_ir; 202 int ret = 0; 203 int empty_cnt; 204 205 mutex_enter(&icp->ic_mutex); 206 while (uiop->uio_resid) { 207 /* wait for full frames, if necessary */ 208 if (irp->ir_read_cnt == 0) { 209 irp->ir_read_off = 0; 210 ret = av1394_ir_wait_frames(icp, 211 &irp->ir_read_idx, &irp->ir_read_cnt); 212 if (ret != 0) { 213 mutex_exit(&icp->ic_mutex); 214 return (ret); 215 } 216 } 217 218 /* copyout the data */ 219 ret = av1394_ir_copyout(icp, uiop, &empty_cnt); 220 221 /* return freed frames to the pool */ 222 if (empty_cnt > 0) { 223 av1394_ir_zero_pkts(icp, irp->ir_read_idx, empty_cnt); 224 ret = av1394_ir_add_frames(icp, irp->ir_read_idx, 225 empty_cnt); 226 irp->ir_read_idx += empty_cnt; 227 irp->ir_read_idx %= icp->ic_nframes; 228 irp->ir_read_cnt -= empty_cnt; 229 } 230 } 231 mutex_exit(&icp->ic_mutex); 232 233 return (ret); 234 } 235 236 /* 237 * 238 * --- configuration routines 239 * 240 */ 241 static void 242 av1394_ir_cleanup(av1394_ic_t *icp, int level) 243 { 244 av1394_isoch_pool_t *pool = &icp->ic_ir.ir_data_pool; 245 246 ASSERT((level > 0) && (level <= AV1394_CLEANUP_LEVEL_MAX)); 247 248 switch (level) { 249 default: 250 av1394_ir_free_isoch_dma(icp); 251 /* FALLTHRU */ 252 case 3: 253 av1394_ir_destroy_ixl(icp); 254 /* FALLTHRU */ 255 case 2: 256 av1394_ic_dma_cleanup(icp, pool); 257 /* FALLTHRU */ 258 case 1: 259 av1394_ic_free_pool(pool); 260 /* FALLTHRU */ 261 } 262 } 263 264 /* 265 * av1394_ir_build_ixl() 266 * Build an IXL chain to receive CIP data. The smallest instance of data 267 * that can be received is a packet, typically 512 bytes. Frames consist 268 * of a number of packets, typically 250-300. Packet size, frame size and 269 * number of frames allocated are set by a user process. The received data 270 * made available to the user process in full frames, hence there an IXL 271 * callback at the end of each frame. A sequence of IXL commands that 272 * receives one frame is further referred to as an IXL data block. 273 * 274 * During normal operation, frames are in a circular list and IXL chain 275 * does not change. When the user process does not keep up with the 276 * data flow and there are too few empty frames left, the jump following 277 * last empty frame is dynamically updated to point to NULL -- otherwise 278 * the first full frame would be overwritten. When IXL execution reaches 279 * the nulled jump, it just waits until the driver updates it again or 280 * stops the transfer. Once a user process frees up enough frames, the 281 * jump is restored and transfer continues. User process will be able to 282 * detect dropped packets using continuity conters embedded in the data. 283 * 284 * Because RECV_BUF buffer size is limited to AV1394_IXL_BUFSZ_MAX, and due 285 * to isoch pool segmentaion, the number of RECV_BUF commands per IXL data 286 * block depends on frame size. Also, to simplify calculations, we consider 287 * a sequence of RECV_BUF commands to consist of two parts: zero or more 288 * equal-sized RECV_BUF commands followed by one "tail" REC_BUF command, 289 * whose size may not be equal to others. 290 * 291 * Schematically the IXL chain looks like this: 292 * 293 * ... 294 * LABEL N; 295 * RECV_BUF(buf) 296 * ... 297 * RECV_BUF(tail) 298 * CALLBACK(frame done); 299 * JUMP_U(LABEL (N+1)%nframes or NULL); 300 * ... 301 */ 302 static int 303 av1394_ir_build_ixl(av1394_ic_t *icp) 304 { 305 av1394_ir_t *irp = &icp->ic_ir; 306 av1394_isoch_pool_t *pool = &irp->ir_data_pool; 307 int i; /* segment index */ 308 int j; 309 int fi; /* frame index */ 310 int bi; /* buffer index */ 311 312 /* allocate space for IXL data blocks */ 313 irp->ir_ixl_data = kmem_zalloc(icp->ic_nframes * 314 sizeof (av1394_ir_ixl_data_t), KM_SLEEP); 315 316 /* 317 * We have a bunch of segments, and each is divided into cookies. We 318 * need to cover the segments with RECV_BUFs such that they 319 * - don't span cookies 320 * - don't span frames 321 * - are at most AV1394_IXL_BUFSZ_MAX 322 * 323 * The straightforward algorithm is to start from the beginning, find 324 * the next lowest frame or cookie boundary, and either make a buf for 325 * it if it is smaller than AV1394_IXL_BUFSZ_MAX, or make multiple 326 * bufs for it as with av1394_ic_ixl_seg_decomp(). And repeat. 327 */ 328 329 irp->ir_ixl_nbufs = 0; 330 for (i = 0; i < pool->ip_nsegs; ++i) { 331 av1394_isoch_seg_t *isp = &pool->ip_seg[i]; 332 size_t dummy1, dummy2; 333 334 uint_t off = 0; 335 uint_t end; 336 337 uint_t frame_end = icp->ic_framesz; 338 int ci = 0; 339 uint_t cookie_end = isp->is_dma_cookie[ci].dmac_size; 340 341 for (;;) { 342 end = min(frame_end, cookie_end); 343 344 if (end - off <= AV1394_IXL_BUFSZ_MAX) { 345 ++irp->ir_ixl_nbufs; 346 } else { 347 irp->ir_ixl_nbufs += av1394_ic_ixl_seg_decomp( 348 end - off, icp->ic_pktsz, &dummy1, &dummy2); 349 /* count the tail buffer */ 350 ++irp->ir_ixl_nbufs; 351 } 352 353 off = end; 354 if (off >= isp->is_size) 355 break; 356 357 if (off == frame_end) 358 frame_end += icp->ic_framesz; 359 if (off == cookie_end) { 360 ++ci; 361 cookie_end += isp->is_dma_cookie[ci].dmac_size; 362 } 363 } 364 } 365 366 irp->ir_ixl_buf = kmem_zalloc(irp->ir_ixl_nbufs * 367 sizeof (ixl1394_xfer_buf_t), KM_SLEEP); 368 369 370 fi = 0; 371 bi = 0; 372 373 for (i = 0; i < pool->ip_nsegs; ++i) { 374 av1394_isoch_seg_t *isp = &pool->ip_seg[i]; 375 376 uint_t off = 0; /* offset into segment */ 377 uint_t end; 378 uint_t coff = 0; /* offset into cookie */ 379 380 381 uint_t frame_end = icp->ic_framesz; 382 int ci = 0; 383 uint_t cookie_end = isp->is_dma_cookie[ci].dmac_size; 384 385 ixl1394_command_t *nextp; 386 387 av1394_ir_ixl_label_init(&irp->ir_ixl_data[fi], 388 (ixl1394_command_t *)&irp->ir_ixl_buf[bi]); 389 390 for (;;) { 391 end = min(frame_end, cookie_end); 392 393 if (end == frame_end) 394 nextp = (ixl1394_command_t *) 395 &irp->ir_ixl_data[fi].rd_cb; 396 else 397 nextp = (ixl1394_command_t *) 398 &irp->ir_ixl_buf[bi + 1]; 399 400 if (end - off <= AV1394_IXL_BUFSZ_MAX) { 401 av1394_ir_ixl_buf_init(icp, 402 &irp->ir_ixl_buf[bi], isp, off, 403 isp->is_dma_cookie[ci].dmac_laddress + coff, 404 end - off, nextp); 405 coff += end - off; 406 off = end; 407 ++bi; 408 } else { 409 size_t reg, tail; 410 uint_t nbufs; 411 412 nbufs = av1394_ic_ixl_seg_decomp(end - off, 413 icp->ic_pktsz, ®, &tail); 414 415 for (j = 0; j < nbufs; ++j) { 416 av1394_ir_ixl_buf_init(icp, 417 &irp->ir_ixl_buf[bi], isp, off, 418 isp->is_dma_cookie[ci]. 419 dmac_laddress + coff, reg, 420 (ixl1394_command_t *) 421 &irp->ir_ixl_buf[bi + 1]); 422 ++bi; 423 off += reg; 424 coff += reg; 425 } 426 427 av1394_ir_ixl_buf_init(icp, 428 &irp->ir_ixl_buf[bi], isp, off, 429 isp->is_dma_cookie[ci].dmac_laddress + coff, 430 tail, nextp); 431 ++bi; 432 off += tail; 433 coff += tail; 434 } 435 436 ASSERT((off == frame_end) || (off == cookie_end)); 437 438 if (off >= isp->is_size) 439 break; 440 441 if (off == frame_end) { 442 av1394_ir_ixl_cb_init(icp, 443 &irp->ir_ixl_data[fi], fi); 444 av1394_ir_ixl_jump_init(icp, 445 &irp->ir_ixl_data[fi], fi); 446 ++fi; 447 frame_end += icp->ic_framesz; 448 av1394_ir_ixl_label_init(&irp->ir_ixl_data[fi], 449 (ixl1394_command_t *)&irp->ir_ixl_buf[bi]); 450 } 451 452 if (off == cookie_end) { 453 ++ci; 454 cookie_end += isp->is_dma_cookie[ci].dmac_size; 455 coff = 0; 456 } 457 } 458 459 av1394_ir_ixl_cb_init(icp, &irp->ir_ixl_data[fi], fi); 460 av1394_ir_ixl_jump_init(icp, &irp->ir_ixl_data[fi], fi); 461 ++fi; 462 } 463 464 ASSERT(fi == icp->ic_nframes); 465 ASSERT(bi == irp->ir_ixl_nbufs); 466 467 irp->ir_ixlp = (ixl1394_command_t *)irp->ir_ixl_data; 468 469 if (av1394_ir_dump_ixl) { 470 av1394_ic_ixl_dump(irp->ir_ixlp); 471 } 472 473 return (DDI_SUCCESS); 474 } 475 476 static void 477 av1394_ir_ixl_label_init(av1394_ir_ixl_data_t *dp, ixl1394_command_t *nextp) 478 { 479 dp->rd_label.ixl_opcode = IXL1394_OP_LABEL; 480 dp->rd_label.next_ixlp = nextp; 481 } 482 483 static void 484 av1394_ir_ixl_buf_init(av1394_ic_t *icp, ixl1394_xfer_buf_t *buf, 485 av1394_isoch_seg_t *isp, off_t offset, uint64_t addr, uint16_t size, 486 ixl1394_command_t *nextp) 487 { 488 buf->ixl_opcode = IXL1394_OP_RECV_BUF; 489 buf->size = size; 490 buf->pkt_size = icp->ic_pktsz; 491 buf->ixl_buf._dmac_ll = addr; 492 buf->mem_bufp = isp->is_kaddr + offset; 493 buf->next_ixlp = nextp; 494 } 495 496 /*ARGSUSED*/ 497 static void 498 av1394_ir_ixl_cb_init(av1394_ic_t *icp, av1394_ir_ixl_data_t *dp, int i) 499 { 500 dp->rd_cb.ixl_opcode = IXL1394_OP_CALLBACK; 501 dp->rd_cb.callback = av1394_ir_ixl_frame_cb; 502 dp->rd_cb.callback_arg = (void *)(intptr_t)i; 503 dp->rd_cb.next_ixlp = (ixl1394_command_t *)&dp->rd_jump; 504 } 505 506 static void 507 av1394_ir_ixl_jump_init(av1394_ic_t *icp, av1394_ir_ixl_data_t *dp, int i) 508 { 509 av1394_ir_t *irp = &icp->ic_ir; 510 int next_idx; 511 ixl1394_command_t *jump_cmd; 512 513 next_idx = (i + 1) % icp->ic_nframes; 514 jump_cmd = (ixl1394_command_t *)&irp->ir_ixl_data[next_idx]; 515 516 dp->rd_jump.ixl_opcode = IXL1394_OP_JUMP_U; 517 dp->rd_jump.label = jump_cmd; 518 dp->rd_jump.next_ixlp = (next_idx != 0) ? jump_cmd : NULL; 519 } 520 521 static void 522 av1394_ir_destroy_ixl(av1394_ic_t *icp) 523 { 524 av1394_ir_t *irp = &icp->ic_ir; 525 526 mutex_enter(&icp->ic_mutex); 527 kmem_free(irp->ir_ixl_buf, 528 irp->ir_ixl_nbufs * sizeof (ixl1394_xfer_buf_t)); 529 kmem_free(irp->ir_ixl_data, 530 icp->ic_nframes * sizeof (av1394_ir_ixl_data_t)); 531 532 irp->ir_ixlp = NULL; 533 irp->ir_ixl_buf = NULL; 534 irp->ir_ixl_data = NULL; 535 mutex_exit(&icp->ic_mutex); 536 } 537 538 static int 539 av1394_ir_alloc_isoch_dma(av1394_ic_t *icp) 540 { 541 av1394_inst_t *avp = icp->ic_avp; 542 av1394_ir_t *irp = &icp->ic_ir; 543 id1394_isoch_dmainfo_t di; 544 int result; 545 int ret; 546 547 di.ixlp = irp->ir_ixlp; 548 di.channel_num = icp->ic_num; 549 di.global_callback_arg = icp; 550 di.idma_options = ID1394_LISTEN_PKT_MODE; 551 di.isoch_dma_stopped = av1394_ir_dma_stopped_cb; 552 di.idma_evt_arg = icp; 553 554 ret = t1394_alloc_isoch_dma(avp->av_t1394_hdl, &di, 0, 555 &icp->ic_isoch_hdl, &result); 556 557 return (ret); 558 } 559 560 static void 561 av1394_ir_free_isoch_dma(av1394_ic_t *icp) 562 { 563 av1394_inst_t *avp = icp->ic_avp; 564 565 t1394_free_isoch_dma(avp->av_t1394_hdl, 0, &icp->ic_isoch_hdl); 566 } 567 568 static void 569 av1394_ir_dma_sync_frames(av1394_ic_t *icp, int idx, int cnt) 570 { 571 av1394_ic_dma_sync_frames(icp, idx, cnt, 572 &icp->ic_ir.ir_data_pool, DDI_DMA_SYNC_FORCPU); 573 } 574 575 /* 576 * 577 * --- callbacks 578 * 579 */ 580 /*ARGSUSED*/ 581 static void 582 av1394_ir_ixl_frame_cb(opaque_t arg, struct ixl1394_callback *cb) 583 { 584 av1394_ic_t *icp = arg; 585 av1394_isoch_t *ip = &icp->ic_avp->av_i; 586 av1394_ir_t *irp = &icp->ic_ir; 587 588 mutex_enter(&ip->i_mutex); 589 mutex_enter(&icp->ic_mutex); 590 if (irp->ir_nfull < icp->ic_nframes) { 591 irp->ir_nfull++; 592 irp->ir_nempty--; 593 cv_broadcast(&icp->ic_xfer_cv); 594 595 /* 596 * signal the overflow condition early, so we get enough 597 * time to handle it before old data is overwritten 598 */ 599 if (irp->ir_nfull >= irp->ir_hiwat) { 600 av1394_ic_trigger_softintr(icp, icp->ic_num, 601 AV1394_PREQ_IR_OVERFLOW); 602 } 603 } 604 mutex_exit(&icp->ic_mutex); 605 mutex_exit(&ip->i_mutex); 606 } 607 608 /* 609 * received data overflow 610 */ 611 void 612 av1394_ir_overflow(av1394_ic_t *icp) 613 { 614 av1394_inst_t *avp = icp->ic_avp; 615 av1394_ir_t *irp = &icp->ic_ir; 616 int idx; 617 ixl1394_jump_t *old_jmp; 618 ixl1394_jump_t new_jmp; 619 id1394_isoch_dma_updateinfo_t update_info; 620 int err; 621 int result; 622 623 /* 624 * in the circular IXL chain overflow means overwriting the least 625 * recent data. to avoid that, we suspend the transfer by NULL'ing 626 * the last IXL block until the user process frees up some frames. 627 */ 628 idx = irp->ir_last_empty; 629 630 old_jmp = &irp->ir_ixl_data[idx].rd_jump; 631 632 new_jmp.ixl_opcode = IXL1394_OP_JUMP_U; 633 new_jmp.label = NULL; 634 new_jmp.next_ixlp = NULL; 635 636 update_info.orig_ixlp = (ixl1394_command_t *)old_jmp; 637 update_info.temp_ixlp = (ixl1394_command_t *)&new_jmp; 638 update_info.ixl_count = 1; 639 640 mutex_exit(&icp->ic_mutex); 641 err = t1394_update_isoch_dma(avp->av_t1394_hdl, icp->ic_isoch_hdl, 642 &update_info, 0, &result); 643 mutex_enter(&icp->ic_mutex); 644 645 if (err == DDI_SUCCESS) { 646 irp->ir_overflow_idx = idx; 647 icp->ic_state = AV1394_IC_SUSPENDED; 648 } 649 } 650 651 /* 652 * restore from overflow condition 653 */ 654 static void 655 av1394_ir_overflow_resume(av1394_ic_t *icp) 656 { 657 av1394_inst_t *avp = icp->ic_avp; 658 av1394_ir_t *irp = &icp->ic_ir; 659 int idx, next_idx; 660 ixl1394_jump_t *old_jmp; 661 ixl1394_jump_t new_jmp; 662 id1394_isoch_dma_updateinfo_t update_info; 663 int err; 664 int result; 665 666 /* 667 * restore the jump command we NULL'ed in av1394_ir_overflow() 668 */ 669 idx = irp->ir_overflow_idx; 670 next_idx = (idx + 1) % icp->ic_nframes; 671 672 old_jmp = &irp->ir_ixl_data[idx].rd_jump; 673 674 new_jmp.ixl_opcode = IXL1394_OP_JUMP_U; 675 new_jmp.label = (ixl1394_command_t *)&irp->ir_ixl_data[next_idx]; 676 new_jmp.next_ixlp = NULL; 677 678 update_info.orig_ixlp = (ixl1394_command_t *)old_jmp; 679 update_info.temp_ixlp = (ixl1394_command_t *)&new_jmp; 680 update_info.ixl_count = 1; 681 682 mutex_exit(&icp->ic_mutex); 683 err = t1394_update_isoch_dma(avp->av_t1394_hdl, 684 icp->ic_isoch_hdl, &update_info, 0, &result); 685 mutex_enter(&icp->ic_mutex); 686 687 if (err == DDI_SUCCESS) { 688 icp->ic_state = AV1394_IC_DMA; 689 } 690 } 691 692 /*ARGSUSED*/ 693 static void 694 av1394_ir_dma_stopped_cb(t1394_isoch_dma_handle_t t1394_idma_hdl, 695 opaque_t idma_evt_arg, id1394_isoch_dma_stopped_t status) 696 { 697 av1394_ic_t *icp = idma_evt_arg; 698 699 mutex_enter(&icp->ic_mutex); 700 icp->ic_state = AV1394_IC_IDLE; 701 mutex_exit(&icp->ic_mutex); 702 } 703 704 705 /* 706 * 707 * --- data transfer routines 708 * 709 * av1394_ir_add_frames() 710 * Add empty frames to the pool. 711 */ 712 static int 713 av1394_ir_add_frames(av1394_ic_t *icp, int idx, int cnt) 714 { 715 av1394_ir_t *irp = &icp->ic_ir; 716 717 /* can only add to the tail */ 718 if (idx != ((irp->ir_last_empty + 1) % icp->ic_nframes)) { 719 return (EINVAL); 720 } 721 722 /* turn full frames into empty ones */ 723 irp->ir_nfull -= cnt; 724 irp->ir_first_full = (irp->ir_first_full + cnt) % icp->ic_nframes; 725 irp->ir_nempty += cnt; 726 irp->ir_last_empty = (irp->ir_last_empty + cnt) % icp->ic_nframes; 727 ASSERT((irp->ir_nfull >= 0) && (irp->ir_nempty <= icp->ic_nframes)); 728 729 /* if suspended due to overflow, check if iwe can resume */ 730 if ((icp->ic_state == AV1394_IC_SUSPENDED) && 731 (irp->ir_nempty >= irp->ir_lowat)) { 732 av1394_ir_overflow_resume(icp); 733 } 734 735 return (0); 736 } 737 738 static int 739 av1394_ir_wait_frames(av1394_ic_t *icp, int *idx, int *cnt) 740 { 741 av1394_ir_t *irp = &icp->ic_ir; 742 int ret = 0; 743 744 while (irp->ir_nfull == 0) { 745 if (cv_wait_sig(&icp->ic_xfer_cv, &icp->ic_mutex) <= 0) { 746 ret = EINTR; 747 break; 748 } 749 } 750 if (irp->ir_nfull > 0) { 751 *idx = irp->ir_first_full; 752 *cnt = irp->ir_nfull; 753 av1394_ir_dma_sync_frames(icp, *idx, *cnt); 754 ret = 0; 755 } 756 return (ret); 757 } 758 759 /* 760 * copyout the data, adjust to data format and remove empty CIPs if possible 761 */ 762 static int 763 av1394_ir_copyout(av1394_ic_t *icp, struct uio *uiop, int *empty_cnt) 764 { 765 av1394_ir_t *irp = &icp->ic_ir; 766 av1394_isoch_seg_t *seg = irp->ir_data_pool.ip_seg; 767 int idx = irp->ir_read_idx; 768 int cnt = irp->ir_read_cnt; 769 int pktsz = icp->ic_pktsz; 770 int bs; /* data block size */ 771 caddr_t kaddr_begin, kaddr; 772 int pkt_off; /* offset into current packet */ 773 int len; 774 int frame_resid; /* bytes left in the current frame */ 775 int ret = 0; 776 777 *empty_cnt = 0; 778 779 /* DBS -> block size */ 780 bs = *(uchar_t *)(seg[idx].is_kaddr + 1) * 4 + AV1394_CIPSZ; 781 if ((bs > pktsz) || (bs < AV1394_CIPSZ + 8)) { 782 bs = pktsz; 783 } 784 785 while ((cnt > 0) && (uiop->uio_resid > 0) && (ret == 0)) { 786 kaddr = kaddr_begin = seg[idx].is_kaddr + irp->ir_read_off; 787 frame_resid = icp->ic_framesz - irp->ir_read_off; 788 789 mutex_exit(&icp->ic_mutex); 790 /* copyout data blocks, skipping empty CIPs */ 791 while ((uiop->uio_resid > 0) && (frame_resid > 0)) { 792 pkt_off = (uintptr_t)kaddr % pktsz; 793 /* 794 * a quadlet following CIP header can't be zero 795 * unless in an empty packet 796 */ 797 if ((pkt_off == 0) && 798 (*(uint32_t *)(kaddr + AV1394_CIPSZ) == 0)) { 799 kaddr += pktsz; 800 frame_resid -= pktsz; 801 continue; 802 } 803 804 len = bs - pkt_off; 805 if (len > uiop->uio_resid) { 806 len = uiop->uio_resid; 807 } 808 if (len > frame_resid) { 809 len = frame_resid; 810 } 811 if ((ret = uiomove(kaddr, len, UIO_READ, uiop)) != 0) { 812 break; 813 } 814 815 if (pkt_off + len == bs) { 816 kaddr += pktsz - pkt_off; 817 frame_resid -= pktsz - pkt_off; 818 } else { 819 kaddr += len; 820 frame_resid -= len; 821 } 822 } 823 mutex_enter(&icp->ic_mutex); 824 825 if (frame_resid > 0) { 826 irp->ir_read_off = kaddr - kaddr_begin; 827 } else { 828 irp->ir_read_off = 0; 829 idx = (idx + 1) % icp->ic_nframes; 830 cnt--; 831 (*empty_cnt)++; 832 } 833 } 834 835 return (ret); 836 } 837 838 /* 839 * zero a quadlet in each packet so we can recognize empty CIPs 840 */ 841 static void 842 av1394_ir_zero_pkts(av1394_ic_t *icp, int idx, int cnt) 843 { 844 av1394_ir_t *irp = &icp->ic_ir; 845 av1394_isoch_seg_t *seg = irp->ir_data_pool.ip_seg; 846 caddr_t kaddr, kaddr_end; 847 int pktsz = icp->ic_pktsz; 848 int i; 849 850 for (i = cnt; i > 0; i--) { 851 kaddr = seg[idx].is_kaddr + AV1394_CIPSZ; 852 kaddr_end = seg[idx].is_kaddr + icp->ic_framesz; 853 do { 854 *(uint32_t *)kaddr = 0; 855 kaddr += pktsz; 856 } while (kaddr < kaddr_end); 857 858 idx = (idx + 1) % icp->ic_nframes; 859 } 860 } 861