1 /* 2 * Implementation of SCSI Processor Target Peripheral driver for CAM. 3 * 4 * Copyright (c) 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $Id: scsi_pt.c,v 1.2 1998/10/15 17:46:26 ken Exp $ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/queue.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/types.h> 36 #include <sys/buf.h> 37 #include <sys/devicestat.h> 38 #include <sys/malloc.h> 39 #include <sys/conf.h> 40 41 #include <cam/cam.h> 42 #include <cam/cam_ccb.h> 43 #include <cam/cam_extend.h> 44 #include <cam/cam_periph.h> 45 #include <cam/cam_xpt_periph.h> 46 #include <cam/cam_debug.h> 47 48 #include <cam/scsi/scsi_all.h> 49 #include <cam/scsi/scsi_message.h> 50 #include <cam/scsi/scsi_pt.h> 51 52 typedef enum { 53 PT_STATE_PROBE, 54 PT_STATE_NORMAL 55 } pt_state; 56 57 typedef enum { 58 PT_FLAG_NONE = 0x00, 59 PT_FLAG_OPEN = 0x01, 60 PT_FLAG_DEVICE_INVALID = 0x02, 61 PT_FLAG_RETRY_UA = 0x04 62 } pt_flags; 63 64 typedef enum { 65 PT_CCB_BUFFER_IO = 0x01, 66 PT_CCB_WAITING = 0x02, 67 PT_CCB_RETRY_UA = 0x04, 68 PT_CCB_BUFFER_IO_UA = PT_CCB_BUFFER_IO|PT_CCB_RETRY_UA 69 } pt_ccb_state; 70 71 /* Offsets into our private area for storing information */ 72 #define ccb_state ppriv_field0 73 #define ccb_bp ppriv_ptr1 74 75 struct pt_softc { 76 struct buf_queue_head buf_queue; 77 struct devstat device_stats; 78 LIST_HEAD(, ccb_hdr) pending_ccbs; 79 pt_state state; 80 pt_flags flags; 81 union ccb saved_ccb; 82 }; 83 84 static d_open_t ptopen; 85 static d_read_t ptread; 86 static d_write_t ptwrite; 87 static d_close_t ptclose; 88 static d_strategy_t ptstrategy; 89 static periph_init_t ptinit; 90 static void ptasync(void *callback_arg, u_int32_t code, 91 struct cam_path *path, void *arg); 92 static periph_ctor_t ptctor; 93 static periph_oninv_t ptoninvalidate; 94 static periph_dtor_t ptdtor; 95 static periph_start_t ptstart; 96 static void ptdone(struct cam_periph *periph, 97 union ccb *done_ccb); 98 static int pterror(union ccb *ccb, u_int32_t cam_flags, 99 u_int32_t sense_flags); 100 101 void scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries, 102 void (*cbfcnp)(struct cam_periph *, union ccb *), 103 u_int tag_action, int readop, u_int byte2, 104 u_int32_t xfer_len, u_int8_t *data_ptr, 105 u_int8_t sense_len, u_int32_t timeout); 106 107 static struct periph_driver ptdriver = 108 { 109 ptinit, "pt", 110 TAILQ_HEAD_INITIALIZER(ptdriver.units), /* generation */ 0 111 }; 112 113 DATA_SET(periphdriver_set, ptdriver); 114 115 #define PT_CDEV_MAJOR 61 116 117 static struct cdevsw pt_cdevsw = 118 { 119 /*d_open*/ ptopen, 120 /*d_close*/ ptclose, 121 /*d_read*/ ptread, 122 /*d_write*/ ptwrite, 123 /*d_ioctl*/ noioctl, 124 /*d_stop*/ nostop, 125 /*d_reset*/ noreset, 126 /*d_devtotty*/ nodevtotty, 127 /*d_poll*/ seltrue, 128 /*d_mmap*/ nommap, 129 /*d_strategy*/ ptstrategy, 130 /*d_name*/ "pt", 131 /*d_spare*/ NULL, 132 /*d_maj*/ -1, 133 /*d_dump*/ nodump, 134 /*d_psize*/ nopsize, 135 /*d_flags*/ 0, 136 /*d_maxio*/ 0, 137 /*b_maj*/ -1 138 }; 139 140 static struct extend_array *ptperiphs; 141 142 static int 143 ptopen(dev_t dev, int flags, int fmt, struct proc *p) 144 { 145 struct cam_periph *periph; 146 struct pt_softc *softc; 147 int unit; 148 int error; 149 int s; 150 151 unit = minor(dev); 152 periph = cam_extend_get(ptperiphs, unit); 153 if (periph == NULL) 154 return (ENXIO); 155 156 softc = (struct pt_softc *)periph->softc; 157 158 s = splsoftcam(); 159 if (softc->flags & PT_FLAG_DEVICE_INVALID) { 160 splx(s); 161 return(ENXIO); 162 } 163 splx(s); 164 165 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 166 ("ptopen: dev=0x%x (unit %d)\n", dev, unit)); 167 168 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) 169 return (error); /* error code from tsleep */ 170 171 if ((softc->flags & PT_FLAG_OPEN) == 0) { 172 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 173 error = ENXIO; 174 else 175 softc->flags |= PT_FLAG_OPEN; 176 } else 177 error = EBUSY; 178 179 cam_periph_unlock(periph); 180 return (error); 181 } 182 183 static int 184 ptclose(dev_t dev, int flag, int fmt, struct proc *p) 185 { 186 struct cam_periph *periph; 187 struct pt_softc *softc; 188 int unit; 189 int error; 190 191 unit = minor(dev); 192 periph = cam_extend_get(ptperiphs, unit); 193 if (periph == NULL) 194 return (ENXIO); 195 196 softc = (struct pt_softc *)periph->softc; 197 198 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) 199 return (error); /* error code from tsleep */ 200 201 softc->flags &= ~PT_FLAG_OPEN; 202 cam_periph_unlock(periph); 203 cam_periph_release(periph); 204 return (0); 205 } 206 207 static int 208 ptread(dev_t dev, struct uio *uio, int ioflag) 209 { 210 return(physio(ptstrategy, NULL, dev, 1, minphys, uio)); 211 } 212 213 static int 214 ptwrite(dev_t dev, struct uio *uio, int ioflag) 215 { 216 return(physio(ptstrategy, NULL, dev, 0, minphys, uio)); 217 } 218 219 /* 220 * Actually translate the requested transfer into one the physical driver 221 * can understand. The transfer is described by a buf and will include 222 * only one physical transfer. 223 */ 224 static void 225 ptstrategy(struct buf *bp) 226 { 227 struct cam_periph *periph; 228 struct pt_softc *softc; 229 u_int unit; 230 int s; 231 232 unit = minor(bp->b_dev); 233 periph = cam_extend_get(ptperiphs, unit); 234 if (periph == NULL) { 235 bp->b_error = ENXIO; 236 goto bad; 237 } 238 softc = (struct pt_softc *)periph->softc; 239 240 /* 241 * Mask interrupts so that the pack cannot be invalidated until 242 * after we are in the queue. Otherwise, we might not properly 243 * clean up one of the buffers. 244 */ 245 s = splbio(); 246 247 /* 248 * If the device has been made invalid, error out 249 */ 250 if ((softc->flags & PT_FLAG_DEVICE_INVALID)) { 251 splx(s); 252 bp->b_error = ENXIO; 253 goto bad; 254 } 255 256 /* 257 * Place it in the queue of disk activities for this disk 258 */ 259 bufq_insert_tail(&softc->buf_queue, bp); 260 261 splx(s); 262 263 /* 264 * Schedule ourselves for performing the work. 265 */ 266 xpt_schedule(periph, /* XXX priority */1); 267 268 return; 269 bad: 270 bp->b_flags |= B_ERROR; 271 272 /* 273 * Correctly set the buf to indicate a completed xfer 274 */ 275 bp->b_resid = bp->b_bcount; 276 biodone(bp); 277 } 278 279 static void 280 ptinit(void) 281 { 282 cam_status status; 283 struct cam_path *path; 284 285 /* 286 * Create our extend array for storing the devices we attach to. 287 */ 288 ptperiphs = cam_extend_new(); 289 if (ptperiphs == NULL) { 290 printf("pt: Failed to alloc extend array!\n"); 291 return; 292 } 293 294 /* 295 * Install a global async callback. This callback will 296 * receive async callbacks like "new device found". 297 */ 298 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 299 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 300 301 if (status == CAM_REQ_CMP) { 302 struct ccb_setasync csa; 303 304 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 305 csa.ccb_h.func_code = XPT_SASYNC_CB; 306 csa.event_enable = AC_FOUND_DEVICE; 307 csa.callback = ptasync; 308 csa.callback_arg = NULL; 309 xpt_action((union ccb *)&csa); 310 status = csa.ccb_h.status; 311 xpt_free_path(path); 312 } 313 314 if (status != CAM_REQ_CMP) { 315 printf("pt: Failed to attach master async callback " 316 "due to status 0x%x!\n", status); 317 } else { 318 /* If we were successfull, register our devsw */ 319 dev_t dev; 320 321 dev = makedev(PT_CDEV_MAJOR, 0); 322 cdevsw_add(&dev,&pt_cdevsw, NULL); 323 } 324 } 325 326 static cam_status 327 ptctor(struct cam_periph *periph, void *arg) 328 { 329 struct pt_softc *softc; 330 struct ccb_setasync csa; 331 struct ccb_getdev *cgd; 332 333 cgd = (struct ccb_getdev *)arg; 334 if (periph == NULL) { 335 printf("ptregister: periph was NULL!!\n"); 336 return(CAM_REQ_CMP_ERR); 337 } 338 339 if (cgd == NULL) { 340 printf("ptregister: no getdev CCB, can't register device\n"); 341 return(CAM_REQ_CMP_ERR); 342 } 343 344 softc = (struct pt_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); 345 346 if (softc == NULL) { 347 printf("daregister: Unable to probe new device. " 348 "Unable to allocate softc\n"); 349 return(CAM_REQ_CMP_ERR); 350 } 351 352 bzero(softc, sizeof(*softc)); 353 LIST_INIT(&softc->pending_ccbs); 354 softc->state = PT_STATE_NORMAL; 355 bufq_init(&softc->buf_queue); 356 357 periph->softc = softc; 358 359 cam_extend_set(ptperiphs, periph->unit_number, periph); 360 361 /* 362 * The DA driver supports a blocksize, but 363 * we don't know the blocksize until we do 364 * a read capacity. So, set a flag to 365 * indicate that the blocksize is 366 * unavailable right now. We'll clear the 367 * flag as soon as we've done a read capacity. 368 */ 369 devstat_add_entry(&softc->device_stats, "pt", 370 periph->unit_number, 0, 371 DEVSTAT_NO_BLOCKSIZE, 372 cgd->pd_type | DEVSTAT_TYPE_IF_SCSI); 373 374 /* 375 * Add async callbacks for bus reset and 376 * bus device reset calls. I don't bother 377 * checking if this fails as, in most cases, 378 * the system will function just fine without 379 * them and the only alternative would be to 380 * not attach the device on failure. 381 */ 382 xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); 383 csa.ccb_h.func_code = XPT_SASYNC_CB; 384 csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; 385 csa.callback = ptasync; 386 csa.callback_arg = periph; 387 xpt_action((union ccb *)&csa); 388 389 /* Tell the user we've attached to the device */ 390 xpt_announce_periph(periph, NULL); 391 392 return(CAM_REQ_CMP); 393 } 394 395 static void 396 ptoninvalidate(struct cam_periph *periph) 397 { 398 int s; 399 struct pt_softc *softc; 400 struct buf *q_bp; 401 struct ccb_setasync csa; 402 403 softc = (struct pt_softc *)periph->softc; 404 405 /* 406 * De-register any async callbacks. 407 */ 408 xpt_setup_ccb(&csa.ccb_h, periph->path, 409 /* priority */ 5); 410 csa.ccb_h.func_code = XPT_SASYNC_CB; 411 csa.event_enable = 0; 412 csa.callback = ptasync; 413 csa.callback_arg = periph; 414 xpt_action((union ccb *)&csa); 415 416 softc->flags |= PT_FLAG_DEVICE_INVALID; 417 418 /* 419 * Although the oninvalidate() routines are always called at 420 * splsoftcam, we need to be at splbio() here to keep the buffer 421 * queue from being modified while we traverse it. 422 */ 423 s = splbio(); 424 425 /* 426 * Return all queued I/O with ENXIO. 427 * XXX Handle any transactions queued to the card 428 * with XPT_ABORT_CCB. 429 */ 430 while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){ 431 bufq_remove(&softc->buf_queue, q_bp); 432 q_bp->b_resid = q_bp->b_bcount; 433 q_bp->b_error = ENXIO; 434 q_bp->b_flags |= B_ERROR; 435 biodone(q_bp); 436 } 437 438 splx(s); 439 440 xpt_print_path(periph->path); 441 printf("lost device\n"); 442 } 443 444 static void 445 ptdtor(struct cam_periph *periph) 446 { 447 struct pt_softc *softc; 448 449 softc = (struct pt_softc *)periph->softc; 450 451 devstat_remove_entry(&softc->device_stats); 452 453 cam_extend_release(ptperiphs, periph->unit_number); 454 xpt_print_path(periph->path); 455 printf("removing device entry\n"); 456 free(softc, M_DEVBUF); 457 } 458 459 static void 460 ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 461 { 462 struct cam_periph *periph; 463 464 periph = (struct cam_periph *)callback_arg; 465 switch (code) { 466 case AC_FOUND_DEVICE: 467 { 468 struct ccb_getdev *cgd; 469 cam_status status; 470 471 cgd = (struct ccb_getdev *)arg; 472 473 if (cgd->pd_type != T_PROCESSOR) 474 break; 475 476 /* 477 * Allocate a peripheral instance for 478 * this device and start the probe 479 * process. 480 */ 481 status = cam_periph_alloc(ptctor, ptoninvalidate, ptdtor, 482 ptstart, "pt", CAM_PERIPH_BIO, 483 cgd->ccb_h.path, ptasync, 484 AC_FOUND_DEVICE, cgd); 485 486 if (status != CAM_REQ_CMP 487 && status != CAM_REQ_INPROG) 488 printf("ptasync: Unable to attach to new device " 489 "due to status 0x%x\n", status); 490 break; 491 } 492 case AC_LOST_DEVICE: 493 { 494 cam_periph_invalidate(periph); 495 break; 496 } 497 case AC_SENT_BDR: 498 case AC_BUS_RESET: 499 { 500 struct pt_softc *softc; 501 struct ccb_hdr *ccbh; 502 int s; 503 504 softc = (struct pt_softc *)periph->softc; 505 s = splsoftcam(); 506 /* 507 * Don't fail on the expected unit attention 508 * that will occur. 509 */ 510 softc->flags |= PT_FLAG_RETRY_UA; 511 for (ccbh = LIST_FIRST(&softc->pending_ccbs); 512 ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le)) 513 ccbh->ccb_state |= PT_CCB_RETRY_UA; 514 splx(s); 515 break; 516 } 517 case AC_TRANSFER_NEG: 518 case AC_SCSI_AEN: 519 case AC_UNSOL_RESEL: 520 default: 521 break; 522 } 523 } 524 525 static void 526 ptstart(struct cam_periph *periph, union ccb *start_ccb) 527 { 528 struct pt_softc *softc; 529 struct buf *bp; 530 int s; 531 532 softc = (struct pt_softc *)periph->softc; 533 534 /* 535 * See if there is a buf with work for us to do.. 536 */ 537 s = splbio(); 538 bp = bufq_first(&softc->buf_queue); 539 if (periph->immediate_priority <= periph->pinfo.priority) { 540 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 541 ("queuing for immediate ccb\n")); 542 start_ccb->ccb_h.ccb_state = PT_CCB_WAITING; 543 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 544 periph_links.sle); 545 periph->immediate_priority = CAM_PRIORITY_NONE; 546 splx(s); 547 wakeup(&periph->ccb_list); 548 } else if (bp == NULL) { 549 splx(s); 550 xpt_release_ccb(start_ccb); 551 } else { 552 int oldspl; 553 554 bufq_remove(&softc->buf_queue, bp); 555 556 devstat_start_transaction(&softc->device_stats); 557 558 scsi_send_receive(&start_ccb->csio, 559 /*retries*/4, 560 ptdone, 561 MSG_SIMPLE_Q_TAG, 562 bp->b_flags & B_READ, 563 /*byte2*/0, 564 bp->b_bcount, 565 bp->b_data, 566 /*sense_len*/SSD_FULL_SIZE, 567 /*timeout*/10000); 568 569 start_ccb->ccb_h.ccb_state = PT_CCB_BUFFER_IO; 570 571 /* 572 * Block out any asyncronous callbacks 573 * while we touch the pending ccb list. 574 */ 575 oldspl = splcam(); 576 LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, 577 periph_links.le); 578 splx(oldspl); 579 580 start_ccb->ccb_h.ccb_bp = bp; 581 bp = bufq_first(&softc->buf_queue); 582 splx(s); 583 584 xpt_action(start_ccb); 585 586 if (bp != NULL) { 587 /* Have more work to do, so ensure we stay scheduled */ 588 xpt_schedule(periph, /* XXX priority */1); 589 } 590 } 591 } 592 593 static void 594 ptdone(struct cam_periph *periph, union ccb *done_ccb) 595 { 596 struct pt_softc *softc; 597 struct ccb_scsiio *csio; 598 599 softc = (struct pt_softc *)periph->softc; 600 csio = &done_ccb->csio; 601 switch (csio->ccb_h.ccb_state) { 602 case PT_CCB_BUFFER_IO: 603 case PT_CCB_BUFFER_IO_UA: 604 { 605 struct buf *bp; 606 int oldspl; 607 608 bp = (struct buf *)done_ccb->ccb_h.ccb_bp; 609 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 610 int error; 611 int s; 612 int sf; 613 614 if ((csio->ccb_h.ccb_state & PT_CCB_RETRY_UA) != 0) 615 sf = SF_RETRY_UA; 616 else 617 sf = 0; 618 619 if ((error = pterror(done_ccb, 0, sf)) == ERESTART) { 620 /* 621 * A retry was scheuled, so 622 * just return. 623 */ 624 return; 625 } 626 if (error != 0) { 627 struct buf *q_bp; 628 629 s = splbio(); 630 631 if (error == ENXIO) { 632 /* 633 * Catastrophic error. Mark our device 634 * as invalid. 635 */ 636 xpt_print_path(periph->path); 637 printf("Invalidating device\n"); 638 softc->flags |= PT_FLAG_DEVICE_INVALID; 639 } 640 641 /* 642 * return all queued I/O with EIO, so that 643 * the client can retry these I/Os in the 644 * proper order should it attempt to recover. 645 */ 646 while ((q_bp = bufq_first(&softc->buf_queue)) 647 != NULL) { 648 bufq_remove(&softc->buf_queue, q_bp); 649 q_bp->b_resid = q_bp->b_bcount; 650 q_bp->b_error = EIO; 651 q_bp->b_flags |= B_ERROR; 652 biodone(q_bp); 653 } 654 splx(s); 655 bp->b_error = error; 656 bp->b_resid = bp->b_bcount; 657 bp->b_flags |= B_ERROR; 658 } else { 659 bp->b_resid = csio->resid; 660 bp->b_error = 0; 661 if (bp->b_resid != 0) { 662 /* Short transfer ??? */ 663 bp->b_flags |= B_ERROR; 664 } 665 } 666 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 667 cam_release_devq(done_ccb->ccb_h.path, 668 /*relsim_flags*/0, 669 /*reduction*/0, 670 /*timeout*/0, 671 /*getcount_only*/0); 672 } else { 673 bp->b_resid = csio->resid; 674 if (bp->b_resid != 0) 675 bp->b_flags |= B_ERROR; 676 } 677 678 /* 679 * Block out any asyncronous callbacks 680 * while we touch the pending ccb list. 681 */ 682 oldspl = splcam(); 683 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 684 splx(oldspl); 685 686 devstat_end_transaction(&softc->device_stats, 687 bp->b_bcount - bp->b_resid, 688 done_ccb->csio.tag_action & 0xf, 689 (bp->b_flags & B_READ) ? DEVSTAT_READ 690 : DEVSTAT_WRITE); 691 692 biodone(bp); 693 break; 694 } 695 case PT_CCB_WAITING: 696 /* Caller will release the CCB */ 697 wakeup(&done_ccb->ccb_h.cbfcnp); 698 return; 699 } 700 xpt_release_ccb(done_ccb); 701 } 702 703 static int 704 pterror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 705 { 706 struct pt_softc *softc; 707 struct cam_periph *periph; 708 709 periph = xpt_path_periph(ccb->ccb_h.path); 710 softc = (struct pt_softc *)periph->softc; 711 712 return(cam_periph_error(ccb, cam_flags, sense_flags, 713 &softc->saved_ccb)); 714 } 715 716 void 717 scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries, 718 void (*cbfcnp)(struct cam_periph *, union ccb *), 719 u_int tag_action, int readop, u_int byte2, 720 u_int32_t xfer_len, u_int8_t *data_ptr, u_int8_t sense_len, 721 u_int32_t timeout) 722 { 723 struct scsi_send_receive *scsi_cmd; 724 725 scsi_cmd = (struct scsi_send_receive *)&csio->cdb_io.cdb_bytes; 726 scsi_cmd->opcode = readop ? RECEIVE : SEND; 727 scsi_cmd->byte2 = byte2; 728 scsi_ulto3b(xfer_len, scsi_cmd->xfer_len); 729 scsi_cmd->control = 0; 730 731 cam_fill_csio(csio, 732 retries, 733 cbfcnp, 734 /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT, 735 tag_action, 736 data_ptr, 737 xfer_len, 738 sense_len, 739 sizeof(*scsi_cmd), 740 timeout); 741 } 742