1 /* 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * Copyright (c) 1997 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $Id$ 29 */ 30 31 #include <sys/param.h> 32 #include <sys/queue.h> 33 #ifdef KERNEL 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #endif 37 #include <sys/types.h> 38 #include <sys/buf.h> 39 #include <sys/devicestat.h> 40 #include <sys/dkbad.h> 41 #include <sys/disklabel.h> 42 #include <sys/diskslice.h> 43 #include <sys/malloc.h> 44 #include <sys/conf.h> 45 46 #ifdef KERNEL 47 #include <machine/cons.h> /* For cncheckc */ 48 #include <machine/md_var.h> /* For Maxmem */ 49 50 #include <vm/vm.h> 51 #include <vm/vm_prot.h> 52 #include <vm/pmap.h> 53 #endif 54 55 #ifndef KERNEL 56 #include <stdio.h> 57 #include <string.h> 58 #endif 59 60 #include <cam/cam.h> 61 #include <cam/cam_ccb.h> 62 #include <cam/cam_extend.h> 63 #include <cam/cam_periph.h> 64 #include <cam/cam_xpt_periph.h> 65 #include <cam/cam_debug.h> 66 67 #include <cam/scsi/scsi_all.h> 68 #include <cam/scsi/scsi_message.h> 69 #include <cam/scsi/scsi_da.h> 70 71 #ifdef KERNEL 72 73 typedef enum { 74 DA_STATE_PROBE, 75 DA_STATE_NORMAL 76 } da_state; 77 78 typedef enum { 79 DA_FLAG_PACK_INVALID = 0x001, 80 DA_FLAG_NEW_PACK = 0x002, 81 DA_FLAG_PACK_LOCKED = 0x004, 82 DA_FLAG_PACK_REMOVABLE = 0x008, 83 DA_FLAG_TAGGED_QUEUING = 0x010, 84 DA_FLAG_NEED_OTAG = 0x020, 85 DA_FLAG_WENT_IDLE = 0x040, 86 DA_FLAG_RETRY_UA = 0x080, 87 DA_FLAG_OPEN = 0x100 88 } da_flags; 89 90 typedef enum { 91 DA_CCB_PROBE = 0x01, 92 DA_CCB_BUFFER_IO = 0x02, 93 DA_CCB_WAITING = 0x03, 94 DA_CCB_DUMP = 0x04, 95 DA_CCB_TYPE_MASK = 0x0F, 96 DA_CCB_RETRY_UA = 0x10 97 } da_ccb_state; 98 99 /* Offsets into our private area for storing information */ 100 #define ccb_state ppriv_field0 101 #define ccb_bp ppriv_ptr1 102 103 struct disk_params { 104 u_int8_t heads; 105 u_int16_t cylinders; 106 u_int8_t secs_per_track; 107 u_int32_t secsize; /* Number of bytes/sector */ 108 u_int32_t sectors; /* total number sectors */ 109 }; 110 111 struct da_softc { 112 struct buf_queue_head buf_queue; 113 struct devstat device_stats; 114 SLIST_ENTRY(da_softc) links; 115 LIST_HEAD(, ccb_hdr) pending_ccbs; 116 da_state state; 117 da_flags flags; 118 int ordered_tag_count; 119 struct disk_params params; 120 struct diskslices *dk_slices; /* virtual drives */ 121 union ccb saved_ccb; 122 #ifdef DEVFS 123 void *b_devfs_token; 124 void *c_devfs_token; 125 void *ctl_devfs_token; 126 #endif 127 }; 128 129 static d_open_t daopen; 130 static d_read_t daread; 131 static d_write_t dawrite; 132 static d_close_t daclose; 133 static d_strategy_t dastrategy; 134 static d_ioctl_t daioctl; 135 static d_dump_t dadump; 136 static d_psize_t dasize; 137 static periph_init_t dainit; 138 static void daasync(void *callback_arg, u_int32_t code, 139 struct cam_path *path, void *arg); 140 static periph_ctor_t daregister; 141 static periph_dtor_t dacleanup; 142 static periph_start_t dastart; 143 static void dadone(struct cam_periph *periph, 144 union ccb *done_ccb); 145 static int daerror(union ccb *ccb, u_int32_t cam_flags, 146 u_int32_t sense_flags); 147 static void daprevent(struct cam_periph *periph, int action); 148 static void dasetgeom(struct cam_periph *periph, 149 struct scsi_read_capacity_data * rdcap); 150 static timeout_t dasendorderedtag; 151 152 #ifndef DA_DEFAULT_TIMEOUT 153 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 154 #endif 155 156 /* 157 * DA_ORDEREDTAG_INTERVAL determines how often, relative 158 * to the default timeout, we check to see whether an ordered 159 * tagged transaction is appropriate to prevent simple tag 160 * starvation. Since we'd like to ensure that there is at least 161 * 1/2 of the timeout length left for a starved transaction to 162 * complete after we've sent an ordered tag, we must poll at least 163 * four times in every timeout period. This takes care of the worst 164 * case where a starved transaction starts during an interval that 165 * meets the requirement "don't send an ordered tag" test so it takes 166 * us two intervals to determine that a tag must be sent. 167 */ 168 #ifndef DA_ORDEREDTAG_INTERVAL 169 #define DA_ORDEREDTAG_INTERVAL 4 170 #endif 171 172 static struct periph_driver dadriver = 173 { 174 dainit, "da", 175 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 176 }; 177 178 DATA_SET(periphdriver_set, dadriver); 179 180 #define DA_CDEV_MAJOR 13 181 #define DA_BDEV_MAJOR 4 182 183 /* For 2.2-stable support */ 184 #ifndef D_DISK 185 #define D_DISK 0 186 #endif 187 188 static struct cdevsw da_cdevsw = 189 { 190 /*d_open*/ daopen, 191 /*d_close*/ daclose, 192 /*d_read*/ daread, 193 /*d_write*/ dawrite, 194 /*d_ioctl*/ daioctl, 195 /*d_stop*/ nostop, 196 /*d_reset*/ noreset, 197 /*d_devtotty*/ nodevtotty, 198 /*d_poll*/ seltrue, 199 /*d_mmap*/ nommap, 200 /*d_strategy*/ dastrategy, 201 /*d_name*/ "da", 202 /*d_spare*/ NULL, 203 /*d_maj*/ -1, 204 /*d_dump*/ dadump, 205 /*d_psize*/ dasize, 206 /*d_flags*/ D_DISK, 207 /*d_maxio*/ 0, 208 /*b_maj*/ -1 209 }; 210 211 static SLIST_HEAD(,da_softc) softc_list; 212 static struct extend_array *daperiphs; 213 214 static int 215 daopen(dev_t dev, int flags, int fmt, struct proc *p) 216 { 217 struct cam_periph *periph; 218 struct da_softc *softc; 219 struct disklabel label; 220 int unit; 221 int part; 222 int error; 223 224 unit = dkunit(dev); 225 part = dkpart(dev); 226 periph = cam_extend_get(daperiphs, unit); 227 if (periph == NULL) 228 return (ENXIO); 229 230 softc = (struct da_softc *)periph->softc; 231 232 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 233 ("daopen: dev=0x%x (unit %d , partition %d)\n", dev, 234 unit, part)); 235 236 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { 237 return (error); /* error code from tsleep */ 238 } 239 240 if ((softc->flags & DA_FLAG_OPEN) == 0) { 241 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 242 return(ENXIO); 243 softc->flags |= DA_FLAG_OPEN; 244 } 245 246 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { 247 248 if (softc->dk_slices != NULL) { 249 /* 250 * If any partition is open, but the disk has 251 * been invalidated, disallow further opens. 252 */ 253 if (dsisopen(softc->dk_slices)) { 254 cam_periph_unlock(periph); 255 return(ENXIO); 256 } 257 258 /* Invalidate our pack information */ 259 dsgone(&softc->dk_slices); 260 } 261 softc->flags &= ~DA_FLAG_PACK_INVALID; 262 } 263 264 /* Do a read capacity */ 265 { 266 struct scsi_read_capacity_data *rcap; 267 union ccb *ccb; 268 269 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 270 M_TEMP, 271 M_WAITOK); 272 273 ccb = cam_periph_getccb(periph, /*priority*/1); 274 scsi_read_capacity(&ccb->csio, 275 /*retries*/1, 276 /*cbfncp*/dadone, 277 MSG_SIMPLE_Q_TAG, 278 rcap, 279 SSD_FULL_SIZE, 280 /*timeout*/20000); 281 ccb->ccb_h.ccb_bp = NULL; 282 283 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 284 /*sense_flags*/SF_RETRY_UA, 285 &softc->device_stats); 286 287 xpt_release_ccb(ccb); 288 289 if (error == 0) { 290 dasetgeom(periph, rcap); 291 } 292 293 free(rcap, M_TEMP); 294 } 295 296 if (error == 0) { 297 /* Build label for whole disk. */ 298 bzero(&label, sizeof(label)); 299 label.d_type = DTYPE_SCSI; 300 label.d_secsize = softc->params.secsize; 301 label.d_nsectors = softc->params.secs_per_track; 302 label.d_ntracks = softc->params.heads; 303 label.d_ncylinders = softc->params.cylinders; 304 label.d_secpercyl = softc->params.heads 305 * softc->params.secs_per_track; 306 label.d_secperunit = softc->params.sectors; 307 308 if ((dsisopen(softc->dk_slices) == 0) 309 && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) { 310 daprevent(periph, PR_PREVENT); 311 } 312 313 /* Initialize slice tables. */ 314 error = dsopen("da", dev, fmt, 0, &softc->dk_slices, &label, 315 dastrategy, (ds_setgeom_t *)NULL, 316 &da_cdevsw); 317 318 /* 319 * Check to see whether or not the blocksize is set yet. 320 * If it isn't, set it and then clear the blocksize 321 * unavailable flag for the device statistics. 322 */ 323 if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){ 324 softc->device_stats.block_size = softc->params.secsize; 325 softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE; 326 } 327 } 328 329 if (error != 0) { 330 if ((dsisopen(softc->dk_slices) == 0) 331 && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) { 332 daprevent(periph, PR_ALLOW); 333 } 334 } 335 cam_periph_unlock(periph); 336 return (error); 337 } 338 339 static int 340 daclose(dev_t dev, int flag, int fmt, struct proc *p) 341 { 342 struct cam_periph *periph; 343 struct da_softc *softc; 344 union ccb *ccb; 345 int unit; 346 int error; 347 348 unit = dkunit(dev); 349 periph = cam_extend_get(daperiphs, unit); 350 if (periph == NULL) 351 return (ENXIO); 352 353 softc = (struct da_softc *)periph->softc; 354 355 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) { 356 return (error); /* error code from tsleep */ 357 } 358 359 dsclose(dev, fmt, softc->dk_slices); 360 361 ccb = cam_periph_getccb(periph, /*priority*/1); 362 363 scsi_synchronize_cache(&ccb->csio, 364 /*retries*/1, 365 /*cbfcnp*/dadone, 366 MSG_SIMPLE_Q_TAG, 367 /*begin_lba*/0, /* Cover the whole disk */ 368 /*lb_count*/0, 369 SSD_FULL_SIZE, 370 5 * 60 * 1000); 371 372 /* Ignore any errors */ 373 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 374 /*sense_flags*/0, &softc->device_stats); 375 376 xpt_release_ccb(ccb); 377 378 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) { 379 daprevent(periph, PR_ALLOW); 380 /* 381 * If we've got removeable media, mark the blocksize as 382 * unavailable, since it could change when new media is 383 * inserted. 384 */ 385 softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE; 386 } 387 388 softc->flags &= ~DA_FLAG_OPEN; 389 cam_periph_unlock(periph); 390 cam_periph_release(periph); 391 return (0); 392 } 393 394 static int 395 daread(dev_t dev, struct uio *uio, int ioflag) 396 { 397 return(physio(dastrategy, NULL, dev, 1, minphys, uio)); 398 } 399 400 static int 401 dawrite(dev_t dev, struct uio *uio, int ioflag) 402 { 403 return(physio(dastrategy, NULL, dev, 0, minphys, uio)); 404 } 405 406 /* 407 * Actually translate the requested transfer into one the physical driver 408 * can understand. The transfer is described by a buf and will include 409 * only one physical transfer. 410 */ 411 static void 412 dastrategy(struct buf *bp) 413 { 414 struct cam_periph *periph; 415 struct da_softc *softc; 416 u_int unit; 417 u_int part; 418 int s; 419 420 unit = dkunit(bp->b_dev); 421 part = dkpart(bp->b_dev); 422 periph = cam_extend_get(daperiphs, unit); 423 if (periph == NULL) { 424 bp->b_error = ENXIO; 425 goto bad; 426 } 427 softc = (struct da_softc *)periph->softc; 428 #if 0 429 /* 430 * check it's not too big a transfer for our adapter 431 */ 432 scsi_minphys(bp,&sd_switch); 433 #endif 434 435 /* 436 * Do bounds checking, adjust transfer, set b_cylin and b_pbklno. 437 */ 438 if (dscheck(bp, softc->dk_slices) <= 0) 439 goto done; 440 441 /* 442 * Mask interrupts so that the pack cannot be invalidated until 443 * after we are in the queue. Otherwise, we might not properly 444 * clean up one of the buffers. 445 */ 446 s = splbio(); 447 448 /* 449 * If the device has been made invalid, error out 450 */ 451 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 452 splx(s); 453 bp->b_error = ENXIO; 454 goto bad; 455 } 456 457 /* 458 * Place it in the queue of disk activities for this disk 459 */ 460 bufqdisksort(&softc->buf_queue, bp); 461 462 splx(s); 463 464 /* 465 * Schedule ourselves for performing the work. 466 */ 467 xpt_schedule(periph, /* XXX priority */1); 468 469 return; 470 bad: 471 bp->b_flags |= B_ERROR; 472 done: 473 474 /* 475 * Correctly set the buf to indicate a completed xfer 476 */ 477 bp->b_resid = bp->b_bcount; 478 biodone(bp); 479 return; 480 } 481 482 /* For 2.2-stable support */ 483 #ifndef ENOIOCTL 484 #define ENOIOCTL -1 485 #endif 486 487 static int 488 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 489 { 490 struct cam_periph *periph; 491 struct da_softc *softc; 492 int unit; 493 int error; 494 495 unit = dkunit(dev); 496 periph = cam_extend_get(daperiphs, unit); 497 if (periph == NULL) 498 return (ENXIO); 499 500 softc = (struct da_softc *)periph->softc; 501 502 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n")); 503 504 if (cmd == DIOCSBAD) 505 return (EINVAL); /* XXX */ 506 507 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { 508 return (error); /* error code from tsleep */ 509 } 510 511 error = dsioctl("da", dev, cmd, addr, flag, &softc->dk_slices, 512 dastrategy, (ds_setgeom_t *)NULL); 513 514 if (error == ENOIOCTL) 515 error = cam_periph_ioctl(periph, cmd, addr, daerror); 516 517 cam_periph_unlock(periph); 518 519 return (error); 520 } 521 522 static int 523 dadump(dev_t dev) 524 { 525 struct cam_periph *periph; 526 struct da_softc *softc; 527 struct disklabel *lp; 528 u_int unit; 529 u_int part; 530 long num; /* number of sectors to write */ 531 long blkoff; 532 long blknum; 533 long blkcnt; 534 char *addr; 535 static int dadoingadump = 0; 536 struct ccb_scsiio csio; 537 538 /* toss any characters present prior to dump */ 539 while (cncheckc() != -1) 540 ; 541 542 unit = dkunit(dev); 543 part = dkpart(dev); 544 periph = cam_extend_get(daperiphs, unit); 545 if (periph == NULL) { 546 return (ENXIO); 547 } 548 softc = (struct da_softc *)periph->softc; 549 550 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0 551 || (softc->dk_slices == NULL) 552 || (lp = dsgetlabel(dev, softc->dk_slices)) == NULL) 553 return (ENXIO); 554 555 /* Size of memory to dump, in disk sectors. */ 556 /* XXX Fix up for non DEV_BSIZE sectors!!! */ 557 num = (u_long)Maxmem * PAGE_SIZE / softc->params.secsize; 558 559 blkoff = lp->d_partitions[part].p_offset; 560 blkoff += softc->dk_slices->dss_slices[dkslice(dev)].ds_offset; 561 562 /* check transfer bounds against partition size */ 563 if ((dumplo < 0) || ((dumplo + num) > lp->d_partitions[part].p_size)) 564 return (EINVAL); 565 566 if (dadoingadump != 0) 567 return (EFAULT); 568 569 dadoingadump = 1; 570 571 blknum = dumplo + blkoff; 572 blkcnt = PAGE_SIZE / softc->params.secsize; 573 574 addr = (char *)0; /* starting address */ 575 576 while (num > 0) { 577 578 if (is_physical_memory((vm_offset_t)addr)) { 579 pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, 580 trunc_page(addr), VM_PROT_READ, TRUE); 581 } else { 582 pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, 583 trunc_page(0), VM_PROT_READ, TRUE); 584 } 585 586 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 587 csio.ccb_h.ccb_state = DA_CCB_DUMP; 588 scsi_read_write(&csio, 589 /*retries*/1, 590 dadone, 591 MSG_ORDERED_Q_TAG, 592 /*read*/FALSE, 593 /*byte2*/0, 594 /*minimum_cmd_size*/ 6, 595 blknum, 596 blkcnt, 597 /*data_ptr*/CADDR1, 598 /*dxfer_len*/blkcnt * softc->params.secsize, 599 /*sense_len*/SSD_FULL_SIZE, 600 DA_DEFAULT_TIMEOUT * 1000); 601 xpt_polled_action((union ccb *)&csio); 602 603 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 604 printf("Aborting dump due to I/O error. " 605 "status == 0x%x, scsi status == 0x%x\n", 606 csio.ccb_h.status, csio.scsi_status); 607 return (EIO); 608 } 609 610 if ((unsigned)addr % (1024 * 1024) == 0) { 611 #ifdef HW_WDOG 612 if (wdog_tickler) 613 (*wdog_tickler)(); 614 #endif /* HW_WDOG */ 615 /* Count in MB of data left to write */ 616 printf("%ld ", (num * softc->params.secsize) 617 / (1024 * 1024)); 618 } 619 620 /* update block count */ 621 num -= blkcnt; 622 blknum += blkcnt; 623 (long)addr += blkcnt * softc->params.secsize; 624 625 /* operator aborting dump? */ 626 if (cncheckc() != -1) 627 return (EINTR); 628 } 629 return (0); 630 } 631 632 static int 633 dasize(dev_t dev) 634 { 635 struct cam_periph *periph; 636 struct da_softc *softc; 637 638 periph = cam_extend_get(daperiphs, dkunit(dev)); 639 if (periph == NULL) 640 return (ENXIO); 641 642 softc = (struct da_softc *)periph->softc; 643 644 return (dssize(dev, &softc->dk_slices, daopen, daclose)); 645 } 646 647 static void 648 dainit(void) 649 { 650 cam_status status; 651 struct cam_path *path; 652 653 /* 654 * Create our extend array for storing the devices we attach to. 655 */ 656 daperiphs = cam_extend_new(); 657 SLIST_INIT(&softc_list); 658 if (daperiphs == NULL) { 659 printf("da: Failed to alloc extend array!\n"); 660 return; 661 } 662 663 /* 664 * Install a global async callback. This callback will 665 * receive async callbacks like "new device found". 666 */ 667 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 668 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 669 670 if (status == CAM_REQ_CMP) { 671 struct ccb_setasync csa; 672 673 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 674 csa.ccb_h.func_code = XPT_SASYNC_CB; 675 csa.event_enable = AC_FOUND_DEVICE; 676 csa.callback = daasync; 677 csa.callback_arg = NULL; 678 xpt_action((union ccb *)&csa); 679 status = csa.ccb_h.status; 680 xpt_free_path(path); 681 } 682 683 if (status != CAM_REQ_CMP) { 684 printf("da: Failed to attach master async callback " 685 "due to status 0x%x!\n", status); 686 } else { 687 /* If we were successfull, register our devsw */ 688 cdevsw_add_generic(DA_BDEV_MAJOR, DA_CDEV_MAJOR, &da_cdevsw); 689 690 /* 691 * Schedule a periodic event to occasioanly send an 692 * ordered tag to a device. 693 */ 694 timeout(dasendorderedtag, NULL, 695 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL); 696 } 697 } 698 699 static void 700 dacleanup(struct cam_periph *periph) 701 { 702 cam_extend_release(daperiphs, periph->unit_number); 703 xpt_print_path(periph->path); 704 printf("removing device entry\n"); 705 free(periph->softc, M_DEVBUF); 706 } 707 708 static void 709 daasync(void *callback_arg, u_int32_t code, 710 struct cam_path *path, void *arg) 711 { 712 struct cam_periph *periph; 713 714 periph = (struct cam_periph *)callback_arg; 715 switch (code) { 716 case AC_FOUND_DEVICE: 717 { 718 struct ccb_getdev *cgd; 719 cam_status status; 720 721 cgd = (struct ccb_getdev *)arg; 722 723 if ((cgd->pd_type != T_DIRECT) && (cgd->pd_type != T_OPTICAL)) 724 break; 725 726 /* 727 * Allocate a peripheral instance for 728 * this device and start the probe 729 * process. 730 */ 731 status = cam_periph_alloc(daregister, dacleanup, dastart, 732 "da", CAM_PERIPH_BIO, cgd->ccb_h.path, 733 daasync, AC_FOUND_DEVICE, cgd); 734 735 if (status != CAM_REQ_CMP 736 && status != CAM_REQ_INPROG) 737 printf("daasync: Unable to attach to new device " 738 "due to status 0x%x\n", status); 739 break; 740 } 741 case AC_LOST_DEVICE: 742 { 743 int s; 744 struct da_softc *softc; 745 struct buf *q_bp; 746 struct ccb_setasync csa; 747 748 softc = (struct da_softc *)periph->softc; 749 750 /* 751 * Insure that no other async callbacks that 752 * might affect this peripheral can come through. 753 */ 754 s = splcam(); 755 756 /* 757 * De-register any async callbacks. 758 */ 759 xpt_setup_ccb(&csa.ccb_h, periph->path, 760 /* priority */ 5); 761 csa.ccb_h.func_code = XPT_SASYNC_CB; 762 csa.event_enable = 0; 763 csa.callback = daasync; 764 csa.callback_arg = periph; 765 xpt_action((union ccb *)&csa); 766 767 softc->flags |= DA_FLAG_PACK_INVALID; 768 769 /* 770 * Return all queued I/O with ENXIO. 771 * XXX Handle any transactions queued to the card 772 * with XPT_ABORT_CCB. 773 */ 774 while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){ 775 bufq_remove(&softc->buf_queue, q_bp); 776 q_bp->b_resid = q_bp->b_bcount; 777 q_bp->b_error = ENXIO; 778 q_bp->b_flags |= B_ERROR; 779 biodone(q_bp); 780 } 781 devstat_remove_entry(&softc->device_stats); 782 783 SLIST_REMOVE(&softc_list, softc, da_softc, links); 784 785 xpt_print_path(periph->path); 786 printf("lost device\n"); 787 788 splx(s); 789 790 cam_periph_invalidate(periph); 791 break; 792 } 793 case AC_SENT_BDR: 794 case AC_BUS_RESET: 795 { 796 struct da_softc *softc; 797 struct ccb_hdr *ccbh; 798 int s; 799 800 softc = (struct da_softc *)periph->softc; 801 s = splsoftcam(); 802 /* 803 * Don't fail on the expected unit attention 804 * that will occur. 805 */ 806 softc->flags |= DA_FLAG_RETRY_UA; 807 for (ccbh = LIST_FIRST(&softc->pending_ccbs); 808 ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le)) 809 ccbh->ccb_state |= DA_CCB_RETRY_UA; 810 splx(s); 811 break; 812 } 813 case AC_TRANSFER_NEG: 814 case AC_SCSI_AEN: 815 case AC_UNSOL_RESEL: 816 default: 817 break; 818 } 819 } 820 821 static cam_status 822 daregister(struct cam_periph *periph, void *arg) 823 { 824 int s; 825 struct da_softc *softc; 826 struct ccb_setasync csa; 827 struct ccb_getdev *cgd; 828 829 cgd = (struct ccb_getdev *)arg; 830 if (periph == NULL) { 831 printf("daregister: periph was NULL!!\n"); 832 return(CAM_REQ_CMP_ERR); 833 } 834 835 if (cgd == NULL) { 836 printf("daregister: no getdev CCB, can't register device\n"); 837 return(CAM_REQ_CMP_ERR); 838 } 839 840 softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); 841 842 if (softc == NULL) { 843 printf("daregister: Unable to probe new device. " 844 "Unable to allocate softc\n"); 845 return(CAM_REQ_CMP_ERR); 846 } 847 848 bzero(softc, sizeof(*softc)); 849 LIST_INIT(&softc->pending_ccbs); 850 softc->state = DA_STATE_PROBE; 851 bufq_init(&softc->buf_queue); 852 if (SID_IS_REMOVABLE(&cgd->inq_data)) 853 softc->flags |= DA_FLAG_PACK_REMOVABLE; 854 if ((cgd->inq_data.flags & SID_CmdQue) != 0) 855 softc->flags |= DA_FLAG_TAGGED_QUEUING; 856 857 periph->softc = softc; 858 859 cam_extend_set(daperiphs, periph->unit_number, periph); 860 /* 861 * Block our timeout handler while we 862 * add this softc to the dev list. 863 */ 864 s = splsoftclock(); 865 SLIST_INSERT_HEAD(&softc_list, softc, links); 866 splx(s); 867 868 /* 869 * The DA driver supports a blocksize, but 870 * we don't know the blocksize until we do 871 * a read capacity. So, set a flag to 872 * indicate that the blocksize is 873 * unavailable right now. We'll clear the 874 * flag as soon as we've done a read capacity. 875 */ 876 devstat_add_entry(&softc->device_stats, "da", 877 periph->unit_number, 0, 878 DEVSTAT_BS_UNAVAILABLE, 879 cgd->pd_type | DEVSTAT_TYPE_IF_SCSI); 880 881 /* 882 * Add async callbacks for bus reset and 883 * bus device reset calls. I don't bother 884 * checking if this fails as, in most cases, 885 * the system will function just fine without 886 * them and the only alternative would be to 887 * not attach the device on failure. 888 */ 889 xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); 890 csa.ccb_h.func_code = XPT_SASYNC_CB; 891 csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; 892 csa.callback = daasync; 893 csa.callback_arg = periph; 894 xpt_action((union ccb *)&csa); 895 /* 896 * Lock this peripheral until we are setup. 897 * This first call can't block 898 */ 899 (void)cam_periph_lock(periph, PRIBIO); 900 xpt_schedule(periph, /*priority*/5); 901 902 return(CAM_REQ_CMP); 903 } 904 905 static void 906 dastart(struct cam_periph *periph, union ccb *start_ccb) 907 { 908 struct da_softc *softc; 909 910 softc = (struct da_softc *)periph->softc; 911 912 913 switch (softc->state) { 914 case DA_STATE_NORMAL: 915 { 916 /* Pull a buffer from the queue and get going on it */ 917 struct buf *bp; 918 int s; 919 920 /* 921 * See if there is a buf with work for us to do.. 922 */ 923 s = splbio(); 924 bp = bufq_first(&softc->buf_queue); 925 if (periph->immediate_priority <= periph->pinfo.priority) { 926 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 927 ("queuing for immediate ccb\n")); 928 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING; 929 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 930 periph_links.sle); 931 periph->immediate_priority = CAM_PRIORITY_NONE; 932 splx(s); 933 wakeup(&periph->ccb_list); 934 } else if (bp == NULL) { 935 splx(s); 936 xpt_release_ccb(start_ccb); 937 } else { 938 int oldspl; 939 u_int8_t tag_code; 940 941 bufq_remove(&softc->buf_queue, bp); 942 943 devstat_start_transaction(&softc->device_stats); 944 945 if ((bp->b_flags & B_ORDERED) != 0 946 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { 947 softc->flags &= ~DA_FLAG_NEED_OTAG; 948 softc->ordered_tag_count++; 949 tag_code = MSG_ORDERED_Q_TAG; 950 } else { 951 tag_code = MSG_SIMPLE_Q_TAG; 952 } 953 scsi_read_write(&start_ccb->csio, 954 /*retries*/4, 955 dadone, 956 tag_code, 957 bp->b_flags & B_READ, 958 /*byte2*/0, 959 /*minimum_cmd_size*/ 6, 960 bp->b_pblkno, 961 bp->b_bcount / softc->params.secsize, 962 bp->b_data, 963 bp->b_bcount, 964 /*sense_len*/SSD_FULL_SIZE, 965 DA_DEFAULT_TIMEOUT * 1000); 966 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 967 968 /* 969 * Block out any asyncronous callbacks 970 * while we touch the pending ccb list. 971 */ 972 oldspl = splcam(); 973 LIST_INSERT_HEAD(&softc->pending_ccbs, 974 &start_ccb->ccb_h, periph_links.le); 975 splx(oldspl); 976 977 /* We expect a unit attention from this device */ 978 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 979 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 980 softc->flags &= ~DA_FLAG_RETRY_UA; 981 } 982 983 start_ccb->ccb_h.ccb_bp = bp; 984 bp = bufq_first(&softc->buf_queue); 985 splx(s); 986 987 xpt_action(start_ccb); 988 } 989 990 if (bp != NULL) { 991 /* Have more work to do, so ensure we stay scheduled */ 992 xpt_schedule(periph, /* XXX priority */1); 993 } 994 break; 995 } 996 case DA_STATE_PROBE: 997 { 998 struct ccb_scsiio *csio; 999 struct scsi_read_capacity_data *rcap; 1000 1001 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 1002 M_TEMP, 1003 M_NOWAIT); 1004 if (rcap == NULL) { 1005 printf("dastart: Couldn't malloc read_capacity data\n"); 1006 /* da_free_periph??? */ 1007 break; 1008 } 1009 csio = &start_ccb->csio; 1010 scsi_read_capacity(csio, 1011 /*retries*/4, 1012 dadone, 1013 MSG_SIMPLE_Q_TAG, 1014 rcap, 1015 SSD_FULL_SIZE, 1016 /*timeout*/5000); 1017 start_ccb->ccb_h.ccb_bp = NULL; 1018 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE; 1019 xpt_action(start_ccb); 1020 break; 1021 } 1022 } 1023 } 1024 1025 1026 static void 1027 dadone(struct cam_periph *periph, union ccb *done_ccb) 1028 { 1029 struct da_softc *softc; 1030 struct ccb_scsiio *csio; 1031 1032 softc = (struct da_softc *)periph->softc; 1033 csio = &done_ccb->csio; 1034 switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) { 1035 case DA_CCB_BUFFER_IO: 1036 { 1037 struct buf *bp; 1038 int oldspl; 1039 1040 bp = (struct buf *)done_ccb->ccb_h.ccb_bp; 1041 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1042 int error; 1043 int s; 1044 int sf; 1045 1046 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 1047 sf = SF_RETRY_UA; 1048 else 1049 sf = 0; 1050 1051 if ((error = daerror(done_ccb, 0, sf)) == ERESTART) { 1052 /* 1053 * A retry was scheuled, so 1054 * just return. 1055 */ 1056 return; 1057 } 1058 if (error != 0) { 1059 struct buf *q_bp; 1060 1061 s = splbio(); 1062 1063 if (error == ENXIO) { 1064 /* 1065 * Catastrophic error. Mark our pack as 1066 * invalid. 1067 */ 1068 /* XXX See if this is really a media 1069 * change first. 1070 */ 1071 xpt_print_path(periph->path); 1072 printf("Invalidating pack\n"); 1073 softc->flags |= DA_FLAG_PACK_INVALID; 1074 } 1075 1076 /* 1077 * return all queued I/O with EIO, so that 1078 * the client can retry these I/Os in the 1079 * proper order should it attempt to recover. 1080 */ 1081 while ((q_bp = bufq_first(&softc->buf_queue)) 1082 != NULL) { 1083 bufq_remove(&softc->buf_queue, q_bp); 1084 q_bp->b_resid = q_bp->b_bcount; 1085 q_bp->b_error = EIO; 1086 q_bp->b_flags |= B_ERROR; 1087 biodone(q_bp); 1088 } 1089 splx(s); 1090 bp->b_error = error; 1091 bp->b_resid = bp->b_bcount; 1092 bp->b_flags |= B_ERROR; 1093 } else { 1094 bp->b_resid = csio->resid; 1095 bp->b_error = 0; 1096 if (bp->b_resid != 0) { 1097 /* Short transfer ??? */ 1098 bp->b_flags |= B_ERROR; 1099 } 1100 } 1101 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1102 cam_release_devq(done_ccb->ccb_h.path, 1103 /*relsim_flags*/0, 1104 /*reduction*/0, 1105 /*timeout*/0, 1106 /*getcount_only*/0); 1107 } else { 1108 bp->b_resid = csio->resid; 1109 if (csio->resid > 0) 1110 bp->b_flags |= B_ERROR; 1111 } 1112 1113 /* 1114 * Block out any asyncronous callbacks 1115 * while we touch the pending ccb list. 1116 */ 1117 oldspl = splcam(); 1118 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 1119 splx(oldspl); 1120 1121 devstat_end_transaction(&softc->device_stats, 1122 bp->b_bcount - bp->b_resid, 1123 done_ccb->csio.tag_action & 0xf, 1124 (bp->b_flags & B_READ) ? DEVSTAT_READ 1125 : DEVSTAT_WRITE); 1126 1127 if (softc->device_stats.busy_count == 0) 1128 softc->flags |= DA_FLAG_WENT_IDLE; 1129 1130 biodone(bp); 1131 break; 1132 } 1133 case DA_CCB_PROBE: 1134 { 1135 struct scsi_read_capacity_data *rdcap; 1136 char announce_buf[80]; 1137 1138 rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; 1139 1140 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 1141 struct disk_params *dp; 1142 1143 dasetgeom(periph, rdcap); 1144 dp = &softc->params; 1145 sprintf(announce_buf, 1146 "%ldMB (%d %d byte sectors: %dH %dS/T %dC)", 1147 dp->sectors / ((1024L * 1024L) / dp->secsize), 1148 dp->sectors, dp->secsize, dp->heads, 1149 dp->secs_per_track, dp->cylinders); 1150 } else { 1151 int error; 1152 1153 /* 1154 * Retry any UNIT ATTENTION type errors. They 1155 * are expected at boot. 1156 */ 1157 error = daerror(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT); 1158 if (error == ERESTART) { 1159 /* 1160 * A retry was scheuled, so 1161 * just return. 1162 */ 1163 return; 1164 } else if (error != 0) { 1165 struct scsi_sense_data *sense; 1166 int asc, ascq; 1167 int sense_key, error_code; 1168 int have_sense; 1169 cam_status status; 1170 struct ccb_getdev cgd; 1171 1172 /* Don't wedge this device's queue */ 1173 cam_release_devq(done_ccb->ccb_h.path, 1174 /*relsim_flags*/0, 1175 /*reduction*/0, 1176 /*timeout*/0, 1177 /*getcount_only*/0); 1178 1179 status = done_ccb->ccb_h.status; 1180 1181 xpt_setup_ccb(&cgd.ccb_h, 1182 done_ccb->ccb_h.path, 1183 /* priority */ 1); 1184 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1185 xpt_action((union ccb *)&cgd); 1186 1187 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0) 1188 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0) 1189 || ((status & CAM_AUTOSNS_VALID) == 0)) 1190 have_sense = FALSE; 1191 else 1192 have_sense = TRUE; 1193 1194 if (have_sense) { 1195 sense = &csio->sense_data; 1196 scsi_extract_sense(sense, &error_code, 1197 &sense_key, 1198 &asc, &ascq); 1199 } 1200 /* 1201 * With removable media devices, we expect 1202 * 0x3a (Medium not present) errors, since not 1203 * everyone leaves a disk in the drive. If 1204 * the error is anything else, though, we 1205 * shouldn't attach. 1206 */ 1207 if ((have_sense) && (asc == 0x3a) 1208 && (error_code == SSD_CURRENT_ERROR)) 1209 sprintf(announce_buf, 1210 "Attempt to query device " 1211 "size failed: %s, %s", 1212 scsi_sense_key_text[sense_key], 1213 scsi_sense_desc(asc,ascq, 1214 &cgd.inq_data)); 1215 else { 1216 /* 1217 * If we have sense information, go 1218 * ahead and print it out. 1219 * Otherwise, just say that we 1220 * couldn't attach. 1221 */ 1222 if ((have_sense) && (asc || ascq) 1223 && (error_code == SSD_CURRENT_ERROR)) 1224 sprintf(announce_buf, 1225 "fatal error: %s, %s " 1226 "-- failed to attach " 1227 "to device", 1228 scsi_sense_key_text[sense_key], 1229 scsi_sense_desc(asc,ascq, 1230 &cgd.inq_data)); 1231 else 1232 sprintf(announce_buf, 1233 "fatal error, failed" 1234 " to attach to device"); 1235 1236 /* 1237 * Just print out the error, not 1238 * the full probe message, when we 1239 * don't attach. 1240 */ 1241 printf("%s%d: %s\n", 1242 periph->periph_name, 1243 periph->unit_number, 1244 announce_buf); 1245 scsi_sense_print(&done_ccb->csio); 1246 1247 /* 1248 * Free up resources. 1249 */ 1250 cam_extend_release(daperiphs, 1251 periph->unit_number); 1252 cam_periph_invalidate(periph); 1253 periph = NULL; 1254 } 1255 } 1256 } 1257 free(rdcap, M_TEMP); 1258 if (periph != NULL) { 1259 xpt_announce_periph(periph, announce_buf); 1260 softc->state = DA_STATE_NORMAL; 1261 cam_periph_unlock(periph); 1262 } 1263 break; 1264 } 1265 case DA_CCB_WAITING: 1266 { 1267 /* Caller will release the CCB */ 1268 wakeup(&done_ccb->ccb_h.cbfcnp); 1269 return; 1270 } 1271 case DA_CCB_DUMP: 1272 /* No-op. We're polling */ 1273 return; 1274 } 1275 xpt_release_ccb(done_ccb); 1276 } 1277 1278 static int 1279 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1280 { 1281 struct da_softc *softc; 1282 struct cam_periph *periph; 1283 1284 periph = xpt_path_periph(ccb->ccb_h.path); 1285 softc = (struct da_softc *)periph->softc; 1286 1287 return(cam_periph_error(ccb, cam_flags, sense_flags, 1288 &softc->saved_ccb)); 1289 } 1290 1291 static void 1292 daprevent(struct cam_periph *periph, int action) 1293 { 1294 struct da_softc *softc; 1295 union ccb *ccb; 1296 int error; 1297 1298 softc = (struct da_softc *)periph->softc; 1299 1300 if (((action == PR_ALLOW) 1301 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 1302 || ((action == PR_PREVENT) 1303 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 1304 return; 1305 } 1306 1307 ccb = cam_periph_getccb(periph, /*priority*/1); 1308 1309 scsi_prevent(&ccb->csio, 1310 /*retries*/1, 1311 /*cbcfp*/dadone, 1312 MSG_SIMPLE_Q_TAG, 1313 action, 1314 SSD_FULL_SIZE, 1315 5000); 1316 1317 error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 1318 /*sense_flags*/0, &softc->device_stats); 1319 1320 if (error == 0) { 1321 if (action == PR_ALLOW) 1322 softc->flags &= ~DA_FLAG_PACK_LOCKED; 1323 else 1324 softc->flags |= DA_FLAG_PACK_LOCKED; 1325 } 1326 1327 xpt_release_ccb(ccb); 1328 } 1329 1330 static void 1331 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap) 1332 { 1333 struct ccb_calc_geometry ccg; 1334 struct da_softc *softc; 1335 struct disk_params *dp; 1336 1337 softc = (struct da_softc *)periph->softc; 1338 1339 dp = &softc->params; 1340 dp->secsize = scsi_4btoul(rdcap->length); 1341 dp->sectors = scsi_4btoul(rdcap->addr) + 1; 1342 /* 1343 * Have the controller provide us with a geometry 1344 * for this disk. The only time the geometry 1345 * matters is when we boot and the controller 1346 * is the only one knowledgeable enough to come 1347 * up with something that will make this a bootable 1348 * device. 1349 */ 1350 xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1); 1351 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 1352 ccg.block_size = dp->secsize; 1353 ccg.volume_size = dp->sectors; 1354 ccg.heads = 0; 1355 ccg.secs_per_track = 0; 1356 ccg.cylinders = 0; 1357 xpt_action((union ccb*)&ccg); 1358 dp->heads = ccg.heads; 1359 dp->secs_per_track = ccg.secs_per_track; 1360 dp->cylinders = ccg.cylinders; 1361 } 1362 1363 #endif /* KERNEL */ 1364 1365 void 1366 scsi_read_write(struct ccb_scsiio *csio, u_int32_t retries, 1367 void (*cbfcnp)(struct cam_periph *, union ccb *), 1368 u_int8_t tag_action, int readop, u_int8_t byte2, 1369 int minimum_cmd_size, u_int32_t lba, u_int32_t block_count, 1370 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 1371 u_int32_t timeout) 1372 { 1373 u_int8_t cdb_len; 1374 /* 1375 * Use the smallest possible command to perform the operation 1376 * as some legacy hardware does not support the 10 byte 1377 * commands. If any of the lower 5 bits in byte2 is set, we have 1378 * to go with a larger command. 1379 * 1380 */ 1381 if ((minimum_cmd_size < 10) 1382 && ((lba & 0x1fffff) == lba) 1383 && ((block_count & 0xff) == block_count) 1384 && ((byte2 & 0xe0) == 0)) { 1385 /* 1386 * We can fit in a 6 byte cdb. 1387 */ 1388 struct scsi_rw_6 *scsi_cmd; 1389 1390 scsi_cmd = (struct scsi_rw_6 *)&csio->cdb_io.cdb_bytes; 1391 scsi_cmd->opcode = readop ? READ_6 : WRITE_6; 1392 scsi_ulto3b(lba, scsi_cmd->addr); 1393 scsi_cmd->length = block_count & 0xff; 1394 scsi_cmd->control = 0; 1395 cdb_len = sizeof(*scsi_cmd); 1396 1397 CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, 1398 ("6byte: %x%x%x:%d:%d\n", scsi_cmd->addr[0], 1399 scsi_cmd->addr[1], scsi_cmd->addr[2], 1400 scsi_cmd->length, dxfer_len)); 1401 } else if ((minimum_cmd_size < 12) 1402 && ((block_count & 0xffff) == block_count)) { 1403 /* 1404 * Need a 10 byte cdb. 1405 */ 1406 struct scsi_rw_10 *scsi_cmd; 1407 1408 scsi_cmd = (struct scsi_rw_10 *)&csio->cdb_io.cdb_bytes; 1409 scsi_cmd->opcode = readop ? READ_10 : WRITE_10; 1410 scsi_cmd->byte2 = byte2; 1411 scsi_ulto4b(lba, scsi_cmd->addr); 1412 scsi_cmd->reserved = 0; 1413 scsi_ulto2b(block_count, scsi_cmd->length); 1414 scsi_cmd->control = 0; 1415 cdb_len = sizeof(*scsi_cmd); 1416 1417 CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, 1418 ("10byte: %x%x%x%x:%x%x: %d\n", scsi_cmd->addr[0], 1419 scsi_cmd->addr[1], scsi_cmd->addr[2], 1420 scsi_cmd->addr[3], scsi_cmd->length[0], 1421 scsi_cmd->length[1], dxfer_len)); 1422 } else { 1423 /* 1424 * The block count is too big for a 10 byte CDB, use a 12 1425 * byte CDB. READ/WRITE(12) are currently only defined for 1426 * optical devices. 1427 */ 1428 struct scsi_rw_12 *scsi_cmd; 1429 1430 scsi_cmd = (struct scsi_rw_12 *)&csio->cdb_io.cdb_bytes; 1431 scsi_cmd->opcode = readop ? READ_12 : WRITE_12; 1432 scsi_cmd->byte2 = byte2; 1433 scsi_ulto4b(lba, scsi_cmd->addr); 1434 scsi_cmd->reserved = 0; 1435 scsi_ulto4b(block_count, scsi_cmd->length); 1436 scsi_cmd->control = 0; 1437 cdb_len = sizeof(*scsi_cmd); 1438 1439 CAM_DEBUG(csio->ccb_h.path, CAM_DEBUG_SUBTRACE, 1440 ("12byte: %x%x%x%x:%x%x%x%x: %d\n", scsi_cmd->addr[0], 1441 scsi_cmd->addr[1], scsi_cmd->addr[2], 1442 scsi_cmd->addr[3], scsi_cmd->length[0], 1443 scsi_cmd->length[1], scsi_cmd->length[2], 1444 scsi_cmd->length[3], dxfer_len)); 1445 } 1446 cam_fill_csio(csio, 1447 retries, 1448 cbfcnp, 1449 /*flags*/readop ? CAM_DIR_IN : CAM_DIR_OUT, 1450 tag_action, 1451 data_ptr, 1452 dxfer_len, 1453 sense_len, 1454 cdb_len, 1455 timeout); 1456 } 1457 1458 void 1459 scsi_start_stop(struct ccb_scsiio *csio, u_int32_t retries, 1460 void (*cbfcnp)(struct cam_periph *, union ccb *), 1461 u_int8_t tag_action, int start, int load_eject, 1462 int immediate, u_int8_t sense_len, u_int32_t timeout) 1463 { 1464 struct scsi_start_stop_unit *scsi_cmd; 1465 int extra_flags = 0; 1466 1467 scsi_cmd = (struct scsi_start_stop_unit *)&csio->cdb_io.cdb_bytes; 1468 bzero(scsi_cmd, sizeof(*scsi_cmd)); 1469 scsi_cmd->opcode = START_STOP_UNIT; 1470 if (start != 0) { 1471 scsi_cmd->how |= SSS_START; 1472 /* it takes a lot of power to start a drive */ 1473 extra_flags |= CAM_HIGH_POWER; 1474 } 1475 if (load_eject != 0) 1476 scsi_cmd->how |= SSS_LOEJ; 1477 if (immediate != 0) 1478 scsi_cmd->byte2 |= SSS_IMMED; 1479 1480 cam_fill_csio(csio, 1481 retries, 1482 cbfcnp, 1483 /*flags*/CAM_DIR_NONE | extra_flags, 1484 tag_action, 1485 /*data_ptr*/NULL, 1486 /*dxfer_len*/0, 1487 sense_len, 1488 sizeof(*scsi_cmd), 1489 timeout); 1490 1491 } 1492 1493 #ifdef KERNEL 1494 1495 static void 1496 dasendorderedtag(void *arg) 1497 { 1498 struct da_softc *softc; 1499 int s; 1500 1501 for (softc = SLIST_FIRST(&softc_list); 1502 softc != NULL; 1503 softc = SLIST_NEXT(softc, links)) { 1504 s = splsoftcam(); 1505 if ((softc->ordered_tag_count == 0) 1506 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) { 1507 softc->flags |= DA_FLAG_NEED_OTAG; 1508 } 1509 if (softc->device_stats.busy_count > 0) 1510 softc->flags &= ~DA_FLAG_WENT_IDLE; 1511 1512 softc->ordered_tag_count = 0; 1513 splx(s); 1514 } 1515 /* Queue us up again */ 1516 timeout(dasendorderedtag, NULL, 1517 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL); 1518 } 1519 1520 #endif /* KERNEL */ 1521