1 /* 2 * Implementation of SCSI Processor Target Peripheral driver for CAM. 3 * 4 * Copyright (c) 1998 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $Id: scsi_pt.c,v 1.9 1999/05/30 16:51:05 phk Exp $ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/queue.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/types.h> 36 #include <sys/buf.h> 37 #include <sys/devicestat.h> 38 #include <sys/malloc.h> 39 #include <sys/conf.h> 40 41 #include <cam/cam.h> 42 #include <cam/cam_ccb.h> 43 #include <cam/cam_extend.h> 44 #include <cam/cam_periph.h> 45 #include <cam/cam_xpt_periph.h> 46 #include <cam/cam_debug.h> 47 48 #include <cam/scsi/scsi_all.h> 49 #include <cam/scsi/scsi_message.h> 50 #include <cam/scsi/scsi_pt.h> 51 52 typedef enum { 53 PT_STATE_PROBE, 54 PT_STATE_NORMAL 55 } pt_state; 56 57 typedef enum { 58 PT_FLAG_NONE = 0x00, 59 PT_FLAG_OPEN = 0x01, 60 PT_FLAG_DEVICE_INVALID = 0x02, 61 PT_FLAG_RETRY_UA = 0x04 62 } pt_flags; 63 64 typedef enum { 65 PT_CCB_BUFFER_IO = 0x01, 66 PT_CCB_WAITING = 0x02, 67 PT_CCB_RETRY_UA = 0x04, 68 PT_CCB_BUFFER_IO_UA = PT_CCB_BUFFER_IO|PT_CCB_RETRY_UA 69 } pt_ccb_state; 70 71 /* Offsets into our private area for storing information */ 72 #define ccb_state ppriv_field0 73 #define ccb_bp ppriv_ptr1 74 75 struct pt_softc { 76 struct buf_queue_head buf_queue; 77 struct devstat device_stats; 78 LIST_HEAD(, ccb_hdr) pending_ccbs; 79 pt_state state; 80 pt_flags flags; 81 union ccb saved_ccb; 82 }; 83 84 static d_open_t ptopen; 85 static d_close_t ptclose; 86 static d_strategy_t ptstrategy; 87 static periph_init_t ptinit; 88 static void ptasync(void *callback_arg, u_int32_t code, 89 struct cam_path *path, void *arg); 90 static periph_ctor_t ptctor; 91 static periph_oninv_t ptoninvalidate; 92 static periph_dtor_t ptdtor; 93 static periph_start_t ptstart; 94 static void ptdone(struct cam_periph *periph, 95 union ccb *done_ccb); 96 static int pterror(union ccb *ccb, u_int32_t cam_flags, 97 u_int32_t sense_flags); 98 99 void scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries, 100 void (*cbfcnp)(struct cam_periph *, union ccb *), 101 u_int tag_action, int readop, u_int byte2, 102 u_int32_t xfer_len, u_int8_t *data_ptr, 103 u_int8_t sense_len, u_int32_t timeout); 104 105 static struct periph_driver ptdriver = 106 { 107 ptinit, "pt", 108 TAILQ_HEAD_INITIALIZER(ptdriver.units), /* generation */ 0 109 }; 110 111 DATA_SET(periphdriver_set, ptdriver); 112 113 #define PT_CDEV_MAJOR 61 114 115 static struct cdevsw pt_cdevsw = { 116 /* open */ ptopen, 117 /* close */ ptclose, 118 /* read */ physread, 119 /* write */ physwrite, 120 /* ioctl */ noioctl, 121 /* stop */ nostop, 122 /* reset */ noreset, 123 /* devtotty */ nodevtotty, 124 /* poll */ nopoll, 125 /* mmap */ nommap, 126 /* strategy */ ptstrategy, 127 /* name */ "pt", 128 /* parms */ noparms, 129 /* maj */ PT_CDEV_MAJOR, 130 /* dump */ nodump, 131 /* psize */ nopsize, 132 /* flags */ 0, 133 /* maxio */ 0, 134 /* bmaj */ -1 135 }; 136 137 static struct extend_array *ptperiphs; 138 139 static int 140 ptopen(dev_t dev, int flags, int fmt, struct proc *p) 141 { 142 struct cam_periph *periph; 143 struct pt_softc *softc; 144 int unit; 145 int error; 146 int s; 147 148 unit = minor(dev); 149 periph = cam_extend_get(ptperiphs, unit); 150 if (periph == NULL) 151 return (ENXIO); 152 153 softc = (struct pt_softc *)periph->softc; 154 155 s = splsoftcam(); 156 if (softc->flags & PT_FLAG_DEVICE_INVALID) { 157 splx(s); 158 return(ENXIO); 159 } 160 161 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 162 ("ptopen: dev=0x%x (unit %d)\n", dev, unit)); 163 164 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { 165 splx(s); 166 return (error); /* error code from tsleep */ 167 } 168 169 splx(s); 170 171 if ((softc->flags & PT_FLAG_OPEN) == 0) { 172 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 173 error = ENXIO; 174 else 175 softc->flags |= PT_FLAG_OPEN; 176 } else 177 error = EBUSY; 178 179 cam_periph_unlock(periph); 180 return (error); 181 } 182 183 static int 184 ptclose(dev_t dev, int flag, int fmt, struct proc *p) 185 { 186 struct cam_periph *periph; 187 struct pt_softc *softc; 188 int unit; 189 int error; 190 191 unit = minor(dev); 192 periph = cam_extend_get(ptperiphs, unit); 193 if (periph == NULL) 194 return (ENXIO); 195 196 softc = (struct pt_softc *)periph->softc; 197 198 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) 199 return (error); /* error code from tsleep */ 200 201 softc->flags &= ~PT_FLAG_OPEN; 202 cam_periph_unlock(periph); 203 cam_periph_release(periph); 204 return (0); 205 } 206 207 /* 208 * Actually translate the requested transfer into one the physical driver 209 * can understand. The transfer is described by a buf and will include 210 * only one physical transfer. 211 */ 212 static void 213 ptstrategy(struct buf *bp) 214 { 215 struct cam_periph *periph; 216 struct pt_softc *softc; 217 u_int unit; 218 int s; 219 220 unit = minor(bp->b_dev); 221 periph = cam_extend_get(ptperiphs, unit); 222 if (periph == NULL) { 223 bp->b_error = ENXIO; 224 goto bad; 225 } 226 softc = (struct pt_softc *)periph->softc; 227 228 /* 229 * Mask interrupts so that the pack cannot be invalidated until 230 * after we are in the queue. Otherwise, we might not properly 231 * clean up one of the buffers. 232 */ 233 s = splbio(); 234 235 /* 236 * If the device has been made invalid, error out 237 */ 238 if ((softc->flags & PT_FLAG_DEVICE_INVALID)) { 239 splx(s); 240 bp->b_error = ENXIO; 241 goto bad; 242 } 243 244 /* 245 * Place it in the queue of disk activities for this disk 246 */ 247 bufq_insert_tail(&softc->buf_queue, bp); 248 249 splx(s); 250 251 /* 252 * Schedule ourselves for performing the work. 253 */ 254 xpt_schedule(periph, /* XXX priority */1); 255 256 return; 257 bad: 258 bp->b_flags |= B_ERROR; 259 260 /* 261 * Correctly set the buf to indicate a completed xfer 262 */ 263 bp->b_resid = bp->b_bcount; 264 biodone(bp); 265 } 266 267 static void 268 ptinit(void) 269 { 270 cam_status status; 271 struct cam_path *path; 272 273 /* 274 * Create our extend array for storing the devices we attach to. 275 */ 276 ptperiphs = cam_extend_new(); 277 if (ptperiphs == NULL) { 278 printf("pt: Failed to alloc extend array!\n"); 279 return; 280 } 281 282 /* 283 * Install a global async callback. This callback will 284 * receive async callbacks like "new device found". 285 */ 286 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 287 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 288 289 if (status == CAM_REQ_CMP) { 290 struct ccb_setasync csa; 291 292 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 293 csa.ccb_h.func_code = XPT_SASYNC_CB; 294 csa.event_enable = AC_FOUND_DEVICE; 295 csa.callback = ptasync; 296 csa.callback_arg = NULL; 297 xpt_action((union ccb *)&csa); 298 status = csa.ccb_h.status; 299 xpt_free_path(path); 300 } 301 302 if (status != CAM_REQ_CMP) { 303 printf("pt: Failed to attach master async callback " 304 "due to status 0x%x!\n", status); 305 } else { 306 /* If we were successfull, register our devsw */ 307 cdevsw_add(&pt_cdevsw); 308 } 309 } 310 311 static cam_status 312 ptctor(struct cam_periph *periph, void *arg) 313 { 314 struct pt_softc *softc; 315 struct ccb_setasync csa; 316 struct ccb_getdev *cgd; 317 318 cgd = (struct ccb_getdev *)arg; 319 if (periph == NULL) { 320 printf("ptregister: periph was NULL!!\n"); 321 return(CAM_REQ_CMP_ERR); 322 } 323 324 if (cgd == NULL) { 325 printf("ptregister: no getdev CCB, can't register device\n"); 326 return(CAM_REQ_CMP_ERR); 327 } 328 329 softc = (struct pt_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); 330 331 if (softc == NULL) { 332 printf("daregister: Unable to probe new device. " 333 "Unable to allocate softc\n"); 334 return(CAM_REQ_CMP_ERR); 335 } 336 337 bzero(softc, sizeof(*softc)); 338 LIST_INIT(&softc->pending_ccbs); 339 softc->state = PT_STATE_NORMAL; 340 bufq_init(&softc->buf_queue); 341 342 periph->softc = softc; 343 344 cam_extend_set(ptperiphs, periph->unit_number, periph); 345 346 /* 347 * The DA driver supports a blocksize, but 348 * we don't know the blocksize until we do 349 * a read capacity. So, set a flag to 350 * indicate that the blocksize is 351 * unavailable right now. We'll clear the 352 * flag as soon as we've done a read capacity. 353 */ 354 devstat_add_entry(&softc->device_stats, "pt", 355 periph->unit_number, 0, 356 DEVSTAT_NO_BLOCKSIZE, 357 cgd->pd_type | DEVSTAT_TYPE_IF_SCSI, 358 DEVSTAT_PRIORITY_OTHER); 359 360 /* 361 * Add async callbacks for bus reset and 362 * bus device reset calls. I don't bother 363 * checking if this fails as, in most cases, 364 * the system will function just fine without 365 * them and the only alternative would be to 366 * not attach the device on failure. 367 */ 368 xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); 369 csa.ccb_h.func_code = XPT_SASYNC_CB; 370 csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; 371 csa.callback = ptasync; 372 csa.callback_arg = periph; 373 xpt_action((union ccb *)&csa); 374 375 /* Tell the user we've attached to the device */ 376 xpt_announce_periph(periph, NULL); 377 378 return(CAM_REQ_CMP); 379 } 380 381 static void 382 ptoninvalidate(struct cam_periph *periph) 383 { 384 int s; 385 struct pt_softc *softc; 386 struct buf *q_bp; 387 struct ccb_setasync csa; 388 389 softc = (struct pt_softc *)periph->softc; 390 391 /* 392 * De-register any async callbacks. 393 */ 394 xpt_setup_ccb(&csa.ccb_h, periph->path, 395 /* priority */ 5); 396 csa.ccb_h.func_code = XPT_SASYNC_CB; 397 csa.event_enable = 0; 398 csa.callback = ptasync; 399 csa.callback_arg = periph; 400 xpt_action((union ccb *)&csa); 401 402 softc->flags |= PT_FLAG_DEVICE_INVALID; 403 404 /* 405 * Although the oninvalidate() routines are always called at 406 * splsoftcam, we need to be at splbio() here to keep the buffer 407 * queue from being modified while we traverse it. 408 */ 409 s = splbio(); 410 411 /* 412 * Return all queued I/O with ENXIO. 413 * XXX Handle any transactions queued to the card 414 * with XPT_ABORT_CCB. 415 */ 416 while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){ 417 bufq_remove(&softc->buf_queue, q_bp); 418 q_bp->b_resid = q_bp->b_bcount; 419 q_bp->b_error = ENXIO; 420 q_bp->b_flags |= B_ERROR; 421 biodone(q_bp); 422 } 423 424 splx(s); 425 426 xpt_print_path(periph->path); 427 printf("lost device\n"); 428 } 429 430 static void 431 ptdtor(struct cam_periph *periph) 432 { 433 struct pt_softc *softc; 434 435 softc = (struct pt_softc *)periph->softc; 436 437 devstat_remove_entry(&softc->device_stats); 438 439 cam_extend_release(ptperiphs, periph->unit_number); 440 xpt_print_path(periph->path); 441 printf("removing device entry\n"); 442 free(softc, M_DEVBUF); 443 } 444 445 static void 446 ptasync(void *callback_arg, u_int32_t code, struct cam_path *path, void *arg) 447 { 448 struct cam_periph *periph; 449 450 periph = (struct cam_periph *)callback_arg; 451 switch (code) { 452 case AC_FOUND_DEVICE: 453 { 454 struct ccb_getdev *cgd; 455 cam_status status; 456 457 cgd = (struct ccb_getdev *)arg; 458 459 if (cgd->pd_type != T_PROCESSOR) 460 break; 461 462 /* 463 * Allocate a peripheral instance for 464 * this device and start the probe 465 * process. 466 */ 467 status = cam_periph_alloc(ptctor, ptoninvalidate, ptdtor, 468 ptstart, "pt", CAM_PERIPH_BIO, 469 cgd->ccb_h.path, ptasync, 470 AC_FOUND_DEVICE, cgd); 471 472 if (status != CAM_REQ_CMP 473 && status != CAM_REQ_INPROG) 474 printf("ptasync: Unable to attach to new device " 475 "due to status 0x%x\n", status); 476 break; 477 } 478 case AC_SENT_BDR: 479 case AC_BUS_RESET: 480 { 481 struct pt_softc *softc; 482 struct ccb_hdr *ccbh; 483 int s; 484 485 softc = (struct pt_softc *)periph->softc; 486 s = splsoftcam(); 487 /* 488 * Don't fail on the expected unit attention 489 * that will occur. 490 */ 491 softc->flags |= PT_FLAG_RETRY_UA; 492 for (ccbh = LIST_FIRST(&softc->pending_ccbs); 493 ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le)) 494 ccbh->ccb_state |= PT_CCB_RETRY_UA; 495 splx(s); 496 /* FALLTHROUGH */ 497 } 498 default: 499 cam_periph_async(periph, code, path, arg); 500 break; 501 } 502 } 503 504 static void 505 ptstart(struct cam_periph *periph, union ccb *start_ccb) 506 { 507 struct pt_softc *softc; 508 struct buf *bp; 509 int s; 510 511 softc = (struct pt_softc *)periph->softc; 512 513 /* 514 * See if there is a buf with work for us to do.. 515 */ 516 s = splbio(); 517 bp = bufq_first(&softc->buf_queue); 518 if (periph->immediate_priority <= periph->pinfo.priority) { 519 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 520 ("queuing for immediate ccb\n")); 521 start_ccb->ccb_h.ccb_state = PT_CCB_WAITING; 522 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 523 periph_links.sle); 524 periph->immediate_priority = CAM_PRIORITY_NONE; 525 splx(s); 526 wakeup(&periph->ccb_list); 527 } else if (bp == NULL) { 528 splx(s); 529 xpt_release_ccb(start_ccb); 530 } else { 531 int oldspl; 532 533 bufq_remove(&softc->buf_queue, bp); 534 535 devstat_start_transaction(&softc->device_stats); 536 537 scsi_send_receive(&start_ccb->csio, 538 /*retries*/4, 539 ptdone, 540 MSG_SIMPLE_Q_TAG, 541 bp->b_flags & B_READ, 542 /*byte2*/0, 543 bp->b_bcount, 544 bp->b_data, 545 /*sense_len*/SSD_FULL_SIZE, 546 /*timeout*/10000); 547 548 start_ccb->ccb_h.ccb_state = PT_CCB_BUFFER_IO; 549 550 /* 551 * Block out any asyncronous callbacks 552 * while we touch the pending ccb list. 553 */ 554 oldspl = splcam(); 555 LIST_INSERT_HEAD(&softc->pending_ccbs, &start_ccb->ccb_h, 556 periph_links.le); 557 splx(oldspl); 558 559 start_ccb->ccb_h.ccb_bp = bp; 560 bp = bufq_first(&softc->buf_queue); 561 splx(s); 562 563 xpt_action(start_ccb); 564 565 if (bp != NULL) { 566 /* Have more work to do, so ensure we stay scheduled */ 567 xpt_schedule(periph, /* XXX priority */1); 568 } 569 } 570 } 571 572 static void 573 ptdone(struct cam_periph *periph, union ccb *done_ccb) 574 { 575 struct pt_softc *softc; 576 struct ccb_scsiio *csio; 577 578 softc = (struct pt_softc *)periph->softc; 579 csio = &done_ccb->csio; 580 switch (csio->ccb_h.ccb_state) { 581 case PT_CCB_BUFFER_IO: 582 case PT_CCB_BUFFER_IO_UA: 583 { 584 struct buf *bp; 585 int oldspl; 586 587 bp = (struct buf *)done_ccb->ccb_h.ccb_bp; 588 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 589 int error; 590 int s; 591 int sf; 592 593 if ((csio->ccb_h.ccb_state & PT_CCB_RETRY_UA) != 0) 594 sf = SF_RETRY_UA; 595 else 596 sf = 0; 597 598 sf |= SF_RETRY_SELTO; 599 600 if ((error = pterror(done_ccb, 0, sf)) == ERESTART) { 601 /* 602 * A retry was scheuled, so 603 * just return. 604 */ 605 return; 606 } 607 if (error != 0) { 608 struct buf *q_bp; 609 610 s = splbio(); 611 612 if (error == ENXIO) { 613 /* 614 * Catastrophic error. Mark our device 615 * as invalid. 616 */ 617 xpt_print_path(periph->path); 618 printf("Invalidating device\n"); 619 softc->flags |= PT_FLAG_DEVICE_INVALID; 620 } 621 622 /* 623 * return all queued I/O with EIO, so that 624 * the client can retry these I/Os in the 625 * proper order should it attempt to recover. 626 */ 627 while ((q_bp = bufq_first(&softc->buf_queue)) 628 != NULL) { 629 bufq_remove(&softc->buf_queue, q_bp); 630 q_bp->b_resid = q_bp->b_bcount; 631 q_bp->b_error = EIO; 632 q_bp->b_flags |= B_ERROR; 633 biodone(q_bp); 634 } 635 splx(s); 636 bp->b_error = error; 637 bp->b_resid = bp->b_bcount; 638 bp->b_flags |= B_ERROR; 639 } else { 640 bp->b_resid = csio->resid; 641 bp->b_error = 0; 642 if (bp->b_resid != 0) { 643 /* Short transfer ??? */ 644 bp->b_flags |= B_ERROR; 645 } 646 } 647 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 648 cam_release_devq(done_ccb->ccb_h.path, 649 /*relsim_flags*/0, 650 /*reduction*/0, 651 /*timeout*/0, 652 /*getcount_only*/0); 653 } else { 654 bp->b_resid = csio->resid; 655 if (bp->b_resid != 0) 656 bp->b_flags |= B_ERROR; 657 } 658 659 /* 660 * Block out any asyncronous callbacks 661 * while we touch the pending ccb list. 662 */ 663 oldspl = splcam(); 664 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 665 splx(oldspl); 666 667 devstat_end_transaction(&softc->device_stats, 668 bp->b_bcount - bp->b_resid, 669 done_ccb->csio.tag_action & 0xf, 670 (bp->b_flags & B_READ) ? DEVSTAT_READ 671 : DEVSTAT_WRITE); 672 673 biodone(bp); 674 break; 675 } 676 case PT_CCB_WAITING: 677 /* Caller will release the CCB */ 678 wakeup(&done_ccb->ccb_h.cbfcnp); 679 return; 680 } 681 xpt_release_ccb(done_ccb); 682 } 683 684 static int 685 pterror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 686 { 687 struct pt_softc *softc; 688 struct cam_periph *periph; 689 690 periph = xpt_path_periph(ccb->ccb_h.path); 691 softc = (struct pt_softc *)periph->softc; 692 693 return(cam_periph_error(ccb, cam_flags, sense_flags, 694 &softc->saved_ccb)); 695 } 696 697 void 698 scsi_send_receive(struct ccb_scsiio *csio, u_int32_t retries, 699 void (*cbfcnp)(struct cam_periph *, union ccb *), 700 u_int tag_action, int readop, u_int byte2, 701 u_int32_t xfer_len, u_int8_t *data_ptr, u_int8_t sense_len, 702 u_int32_t timeout) 703 { 704 struct scsi_send_receive *scsi_cmd; 705 706 scsi_cmd = (struct scsi_send_receive *)&csio->cdb_io.cdb_bytes; 707 scsi_cmd->opcode = readop ? RECEIVE : SEND; 708 scsi_cmd->byte2 = byte2; 709 scsi_ulto3b(xfer_len, scsi_cmd->xfer_len); 710 scsi_cmd->control = 0; 711 712 cam_fill_csio(csio, 713 retries, 714 cbfcnp, 715 /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT, 716 tag_action, 717 data_ptr, 718 xfer_len, 719 sense_len, 720 sizeof(*scsi_cmd), 721 timeout); 722 } 723