1 /* 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * Copyright (c) 1997 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifdef _KERNEL 32 #include "opt_hw_wdog.h" 33 #endif /* _KERNEL */ 34 35 #include <sys/param.h> 36 37 #ifdef _KERNEL 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/bio.h> 41 #endif /* _KERNEL */ 42 43 #include <sys/devicestat.h> 44 #include <sys/conf.h> 45 #include <sys/disk.h> 46 #include <sys/eventhandler.h> 47 #include <sys/malloc.h> 48 #include <sys/cons.h> 49 50 #include <machine/md_var.h> 51 52 #include <vm/vm.h> 53 #include <vm/pmap.h> 54 55 #ifndef _KERNEL 56 #include <stdio.h> 57 #include <string.h> 58 #endif /* _KERNEL */ 59 60 #include <cam/cam.h> 61 #include <cam/cam_ccb.h> 62 #include <cam/cam_extend.h> 63 #include <cam/cam_periph.h> 64 #include <cam/cam_xpt_periph.h> 65 66 #include <cam/scsi/scsi_message.h> 67 68 #ifndef _KERNEL 69 #include <cam/scsi/scsi_da.h> 70 #endif /* !_KERNEL */ 71 72 #ifdef _KERNEL 73 typedef enum { 74 DA_STATE_PROBE, 75 DA_STATE_NORMAL 76 } da_state; 77 78 typedef enum { 79 DA_FLAG_PACK_INVALID = 0x001, 80 DA_FLAG_NEW_PACK = 0x002, 81 DA_FLAG_PACK_LOCKED = 0x004, 82 DA_FLAG_PACK_REMOVABLE = 0x008, 83 DA_FLAG_TAGGED_QUEUING = 0x010, 84 DA_FLAG_NEED_OTAG = 0x020, 85 DA_FLAG_WENT_IDLE = 0x040, 86 DA_FLAG_RETRY_UA = 0x080, 87 DA_FLAG_OPEN = 0x100 88 } da_flags; 89 90 typedef enum { 91 DA_Q_NONE = 0x00, 92 DA_Q_NO_SYNC_CACHE = 0x01, 93 DA_Q_NO_6_BYTE = 0x02 94 } da_quirks; 95 96 typedef enum { 97 DA_CCB_PROBE = 0x01, 98 DA_CCB_BUFFER_IO = 0x02, 99 DA_CCB_WAITING = 0x03, 100 DA_CCB_DUMP = 0x04, 101 DA_CCB_TYPE_MASK = 0x0F, 102 DA_CCB_RETRY_UA = 0x10 103 } da_ccb_state; 104 105 /* Offsets into our private area for storing information */ 106 #define ccb_state ppriv_field0 107 #define ccb_bp ppriv_ptr1 108 109 struct disk_params { 110 u_int8_t heads; 111 u_int16_t cylinders; 112 u_int8_t secs_per_track; 113 u_int32_t secsize; /* Number of bytes/sector */ 114 u_int32_t sectors; /* total number sectors */ 115 }; 116 117 struct da_softc { 118 struct bio_queue_head bio_queue; 119 struct devstat device_stats; 120 SLIST_ENTRY(da_softc) links; 121 LIST_HEAD(, ccb_hdr) pending_ccbs; 122 da_state state; 123 da_flags flags; 124 da_quirks quirks; 125 int minimum_cmd_size; 126 int ordered_tag_count; 127 struct disk_params params; 128 struct disk disk; 129 union ccb saved_ccb; 130 }; 131 132 struct da_quirk_entry { 133 struct scsi_inquiry_pattern inq_pat; 134 da_quirks quirks; 135 }; 136 137 static struct da_quirk_entry da_quirk_table[] = 138 { 139 { 140 /* 141 * This particular Fujitsu drive doesn't like the 142 * synchronize cache command. 143 * Reported by: Tom Jackson <toj@gorilla.net> 144 */ 145 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, 146 /*quirks*/ DA_Q_NO_SYNC_CACHE 147 148 }, 149 { 150 /* 151 * This drive doesn't like the synchronize cache command 152 * either. Reported by: Matthew Jacob <mjacob@feral.com> 153 * in NetBSD PR kern/6027, August 24, 1998. 154 */ 155 {T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2217*", "*"}, 156 /*quirks*/ DA_Q_NO_SYNC_CACHE 157 }, 158 { 159 /* 160 * This drive doesn't like the synchronize cache command 161 * either. Reported by: Hellmuth Michaelis (hm@kts.org) 162 * (PR 8882). 163 */ 164 {T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2112*", "*"}, 165 /*quirks*/ DA_Q_NO_SYNC_CACHE 166 }, 167 { 168 /* 169 * Doesn't like the synchronize cache command. 170 * Reported by: Blaz Zupan <blaz@gold.amis.net> 171 */ 172 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, 173 /*quirks*/ DA_Q_NO_SYNC_CACHE 174 }, 175 { 176 /* 177 * Doesn't work correctly with 6 byte reads/writes. 178 * Returns illegal request, and points to byte 9 of the 179 * 6-byte CDB. 180 * Reported by: Adam McDougall <bsdx@spawnet.com> 181 */ 182 {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 4*", "*"}, 183 /*quirks*/ DA_Q_NO_6_BYTE 184 }, 185 { 186 /* 187 * See above. 188 */ 189 {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 2*", "*"}, 190 /*quirks*/ DA_Q_NO_6_BYTE 191 }, 192 { 193 /* 194 * This USB floppy drive uses the UFI command set. This 195 * command set is a derivative of the ATAPI command set and 196 * does not support READ_6 commands only READ_10. It also does 197 * not support sync cache (0x35). 198 */ 199 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Y-E DATA", "USB-FDU", "*"}, 200 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 201 } 202 }; 203 204 static d_open_t daopen; 205 static d_close_t daclose; 206 static d_strategy_t dastrategy; 207 static d_ioctl_t daioctl; 208 static d_dump_t dadump; 209 static periph_init_t dainit; 210 static void daasync(void *callback_arg, u_int32_t code, 211 struct cam_path *path, void *arg); 212 static periph_ctor_t daregister; 213 static periph_dtor_t dacleanup; 214 static periph_start_t dastart; 215 static periph_oninv_t daoninvalidate; 216 static void dadone(struct cam_periph *periph, 217 union ccb *done_ccb); 218 static int daerror(union ccb *ccb, u_int32_t cam_flags, 219 u_int32_t sense_flags); 220 static void daprevent(struct cam_periph *periph, int action); 221 static void dasetgeom(struct cam_periph *periph, 222 struct scsi_read_capacity_data * rdcap); 223 static timeout_t dasendorderedtag; 224 static void dashutdown(void *arg, int howto); 225 226 #ifndef DA_DEFAULT_TIMEOUT 227 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 228 #endif 229 230 /* 231 * DA_ORDEREDTAG_INTERVAL determines how often, relative 232 * to the default timeout, we check to see whether an ordered 233 * tagged transaction is appropriate to prevent simple tag 234 * starvation. Since we'd like to ensure that there is at least 235 * 1/2 of the timeout length left for a starved transaction to 236 * complete after we've sent an ordered tag, we must poll at least 237 * four times in every timeout period. This takes care of the worst 238 * case where a starved transaction starts during an interval that 239 * meets the requirement "don't send an ordered tag" test so it takes 240 * us two intervals to determine that a tag must be sent. 241 */ 242 #ifndef DA_ORDEREDTAG_INTERVAL 243 #define DA_ORDEREDTAG_INTERVAL 4 244 #endif 245 246 static struct periph_driver dadriver = 247 { 248 dainit, "da", 249 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 250 }; 251 252 DATA_SET(periphdriver_set, dadriver); 253 254 #define DA_CDEV_MAJOR 13 255 #define DA_BDEV_MAJOR 4 256 257 /* For 2.2-stable support */ 258 #ifndef D_DISK 259 #define D_DISK 0 260 #endif 261 262 static struct cdevsw da_cdevsw = { 263 /* open */ daopen, 264 /* close */ daclose, 265 /* read */ physread, 266 /* write */ physwrite, 267 /* ioctl */ daioctl, 268 /* poll */ nopoll, 269 /* mmap */ nommap, 270 /* strategy */ dastrategy, 271 /* name */ "da", 272 /* maj */ DA_CDEV_MAJOR, 273 /* dump */ dadump, 274 /* psize */ nopsize, 275 /* flags */ D_DISK, 276 /* bmaj */ DA_BDEV_MAJOR 277 }; 278 279 static struct cdevsw dadisk_cdevsw; 280 281 static SLIST_HEAD(,da_softc) softc_list; 282 static struct extend_array *daperiphs; 283 284 static int 285 daopen(dev_t dev, int flags, int fmt, struct proc *p) 286 { 287 struct cam_periph *periph; 288 struct da_softc *softc; 289 struct disklabel *label; 290 int unit; 291 int part; 292 int error; 293 int s; 294 295 unit = dkunit(dev); 296 part = dkpart(dev); 297 periph = cam_extend_get(daperiphs, unit); 298 if (periph == NULL) 299 return (ENXIO); 300 301 softc = (struct da_softc *)periph->softc; 302 303 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 304 ("daopen: dev=%s (unit %d , partition %d)\n", devtoname(dev), 305 unit, part)); 306 307 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { 308 return (error); /* error code from tsleep */ 309 } 310 311 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 312 return(ENXIO); 313 softc->flags |= DA_FLAG_OPEN; 314 315 s = splsoftcam(); 316 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { 317 /* Invalidate our pack information. */ 318 disk_invalidate(&softc->disk); 319 softc->flags &= ~DA_FLAG_PACK_INVALID; 320 } 321 splx(s); 322 323 /* Do a read capacity */ 324 { 325 struct scsi_read_capacity_data *rcap; 326 union ccb *ccb; 327 328 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 329 M_TEMP, 330 M_WAITOK); 331 332 ccb = cam_periph_getccb(periph, /*priority*/1); 333 scsi_read_capacity(&ccb->csio, 334 /*retries*/1, 335 /*cbfncp*/dadone, 336 MSG_SIMPLE_Q_TAG, 337 rcap, 338 SSD_FULL_SIZE, 339 /*timeout*/60000); 340 ccb->ccb_h.ccb_bp = NULL; 341 342 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 343 /*sense_flags*/SF_RETRY_UA | 344 SF_RETRY_SELTO, 345 &softc->device_stats); 346 347 xpt_release_ccb(ccb); 348 349 if (error == 0) { 350 dasetgeom(periph, rcap); 351 } 352 353 free(rcap, M_TEMP); 354 } 355 356 if (error == 0) { 357 struct ccb_getdev cgd; 358 359 /* Build label for whole disk. */ 360 label = &softc->disk.d_label; 361 bzero(label, sizeof(*label)); 362 label->d_type = DTYPE_SCSI; 363 364 /* 365 * Grab the inquiry data to get the vendor and product names. 366 * Put them in the typename and packname for the label. 367 */ 368 xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1); 369 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 370 xpt_action((union ccb *)&cgd); 371 372 strncpy(label->d_typename, cgd.inq_data.vendor, 373 min(SID_VENDOR_SIZE, sizeof(label->d_typename))); 374 strncpy(label->d_packname, cgd.inq_data.product, 375 min(SID_PRODUCT_SIZE, sizeof(label->d_packname))); 376 377 label->d_secsize = softc->params.secsize; 378 label->d_nsectors = softc->params.secs_per_track; 379 label->d_ntracks = softc->params.heads; 380 label->d_ncylinders = softc->params.cylinders; 381 label->d_secpercyl = softc->params.heads 382 * softc->params.secs_per_track; 383 label->d_secperunit = softc->params.sectors; 384 385 if (((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) { 386 daprevent(periph, PR_PREVENT); 387 } 388 389 /* 390 * Check to see whether or not the blocksize is set yet. 391 * If it isn't, set it and then clear the blocksize 392 * unavailable flag for the device statistics. 393 */ 394 if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){ 395 softc->device_stats.block_size = softc->params.secsize; 396 softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE; 397 } 398 } 399 400 if (error != 0) { 401 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) { 402 daprevent(periph, PR_ALLOW); 403 } 404 } 405 cam_periph_unlock(periph); 406 return (error); 407 } 408 409 static int 410 daclose(dev_t dev, int flag, int fmt, struct proc *p) 411 { 412 struct cam_periph *periph; 413 struct da_softc *softc; 414 int unit; 415 int error; 416 417 unit = dkunit(dev); 418 periph = cam_extend_get(daperiphs, unit); 419 if (periph == NULL) 420 return (ENXIO); 421 422 softc = (struct da_softc *)periph->softc; 423 424 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) { 425 return (error); /* error code from tsleep */ 426 } 427 428 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 429 union ccb *ccb; 430 431 ccb = cam_periph_getccb(periph, /*priority*/1); 432 433 scsi_synchronize_cache(&ccb->csio, 434 /*retries*/1, 435 /*cbfcnp*/dadone, 436 MSG_SIMPLE_Q_TAG, 437 /*begin_lba*/0,/* Cover the whole disk */ 438 /*lb_count*/0, 439 SSD_FULL_SIZE, 440 5 * 60 * 1000); 441 442 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 443 /*sense_flags*/SF_RETRY_UA, 444 &softc->device_stats); 445 446 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 447 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == 448 CAM_SCSI_STATUS_ERROR) { 449 int asc, ascq; 450 int sense_key, error_code; 451 452 scsi_extract_sense(&ccb->csio.sense_data, 453 &error_code, 454 &sense_key, 455 &asc, &ascq); 456 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 457 scsi_sense_print(&ccb->csio); 458 } else { 459 xpt_print_path(periph->path); 460 printf("Synchronize cache failed, status " 461 "== 0x%x, scsi status == 0x%x\n", 462 ccb->csio.ccb_h.status, 463 ccb->csio.scsi_status); 464 } 465 } 466 467 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 468 cam_release_devq(ccb->ccb_h.path, 469 /*relsim_flags*/0, 470 /*reduction*/0, 471 /*timeout*/0, 472 /*getcount_only*/0); 473 474 xpt_release_ccb(ccb); 475 476 } 477 478 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) { 479 daprevent(periph, PR_ALLOW); 480 /* 481 * If we've got removeable media, mark the blocksize as 482 * unavailable, since it could change when new media is 483 * inserted. 484 */ 485 softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE; 486 } 487 488 softc->flags &= ~DA_FLAG_OPEN; 489 cam_periph_unlock(periph); 490 cam_periph_release(periph); 491 return (0); 492 } 493 494 /* 495 * Actually translate the requested transfer into one the physical driver 496 * can understand. The transfer is described by a buf and will include 497 * only one physical transfer. 498 */ 499 static void 500 dastrategy(struct bio *bp) 501 { 502 struct cam_periph *periph; 503 struct da_softc *softc; 504 u_int unit; 505 u_int part; 506 int s; 507 508 unit = dkunit(bp->bio_dev); 509 part = dkpart(bp->bio_dev); 510 periph = cam_extend_get(daperiphs, unit); 511 if (periph == NULL) { 512 bp->bio_error = ENXIO; 513 goto bad; 514 } 515 softc = (struct da_softc *)periph->softc; 516 #if 0 517 /* 518 * check it's not too big a transfer for our adapter 519 */ 520 scsi_minphys(bp,&sd_switch); 521 #endif 522 523 /* 524 * Mask interrupts so that the pack cannot be invalidated until 525 * after we are in the queue. Otherwise, we might not properly 526 * clean up one of the buffers. 527 */ 528 s = splbio(); 529 530 /* 531 * If the device has been made invalid, error out 532 */ 533 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 534 splx(s); 535 bp->bio_error = ENXIO; 536 goto bad; 537 } 538 539 /* 540 * Place it in the queue of disk activities for this disk 541 */ 542 bioqdisksort(&softc->bio_queue, bp); 543 544 splx(s); 545 546 /* 547 * Schedule ourselves for performing the work. 548 */ 549 xpt_schedule(periph, /* XXX priority */1); 550 551 return; 552 bad: 553 bp->bio_flags |= BIO_ERROR; 554 555 /* 556 * Correctly set the buf to indicate a completed xfer 557 */ 558 bp->bio_resid = bp->bio_bcount; 559 biodone(bp); 560 return; 561 } 562 563 /* For 2.2-stable support */ 564 #ifndef ENOIOCTL 565 #define ENOIOCTL -1 566 #endif 567 568 static int 569 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 570 { 571 struct cam_periph *periph; 572 struct da_softc *softc; 573 int unit; 574 int error; 575 576 unit = dkunit(dev); 577 periph = cam_extend_get(daperiphs, unit); 578 if (periph == NULL) 579 return (ENXIO); 580 581 softc = (struct da_softc *)periph->softc; 582 583 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n")); 584 585 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { 586 return (error); /* error code from tsleep */ 587 } 588 589 error = cam_periph_ioctl(periph, cmd, addr, daerror); 590 591 cam_periph_unlock(periph); 592 593 return (error); 594 } 595 596 static int 597 dadump(dev_t dev) 598 { 599 struct cam_periph *periph; 600 struct da_softc *softc; 601 u_int unit; 602 u_int part; 603 u_int secsize; 604 u_int num; /* number of sectors to write */ 605 u_int blknum; 606 long blkcnt; 607 vm_offset_t addr; 608 struct ccb_scsiio csio; 609 int error; 610 611 /* toss any characters present prior to dump */ 612 while (cncheckc() != -1) 613 ; 614 615 unit = dkunit(dev); 616 part = dkpart(dev); 617 periph = cam_extend_get(daperiphs, unit); 618 if (periph == NULL) { 619 return (ENXIO); 620 } 621 softc = (struct da_softc *)periph->softc; 622 623 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) 624 return (ENXIO); 625 626 error = disk_dumpcheck(dev, &num, &blknum, &secsize); 627 if (error) 628 return (error); 629 630 addr = 0; /* starting address */ 631 blkcnt = howmany(PAGE_SIZE, secsize); 632 633 while (num > 0) { 634 635 if (is_physical_memory(addr)) { 636 pmap_kenter((vm_offset_t)CADDR1, trunc_page(addr)); 637 } else { 638 pmap_kenter((vm_offset_t)CADDR1, trunc_page(0)); 639 } 640 641 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 642 csio.ccb_h.ccb_state = DA_CCB_DUMP; 643 scsi_read_write(&csio, 644 /*retries*/1, 645 dadone, 646 MSG_ORDERED_Q_TAG, 647 /*read*/FALSE, 648 /*byte2*/0, 649 /*minimum_cmd_size*/ softc->minimum_cmd_size, 650 blknum, 651 blkcnt, 652 /*data_ptr*/CADDR1, 653 /*dxfer_len*/blkcnt * secsize, 654 /*sense_len*/SSD_FULL_SIZE, 655 DA_DEFAULT_TIMEOUT * 1000); 656 xpt_polled_action((union ccb *)&csio); 657 658 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 659 printf("Aborting dump due to I/O error.\n"); 660 if ((csio.ccb_h.status & CAM_STATUS_MASK) == 661 CAM_SCSI_STATUS_ERROR) 662 scsi_sense_print(&csio); 663 else 664 printf("status == 0x%x, scsi status == 0x%x\n", 665 csio.ccb_h.status, csio.scsi_status); 666 return(EIO); 667 } 668 669 if (addr % (1024 * 1024) == 0) { 670 #ifdef HW_WDOG 671 if (wdog_tickler) 672 (*wdog_tickler)(); 673 #endif /* HW_WDOG */ 674 /* Count in MB of data left to write */ 675 printf("%d ", (num * softc->params.secsize) 676 / (1024 * 1024)); 677 } 678 679 /* update block count */ 680 num -= blkcnt; 681 blknum += blkcnt; 682 addr += PAGE_SIZE; 683 684 /* operator aborting dump? */ 685 if (cncheckc() != -1) 686 return (EINTR); 687 } 688 689 /* 690 * Sync the disk cache contents to the physical media. 691 */ 692 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 693 694 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 695 csio.ccb_h.ccb_state = DA_CCB_DUMP; 696 scsi_synchronize_cache(&csio, 697 /*retries*/1, 698 /*cbfcnp*/dadone, 699 MSG_SIMPLE_Q_TAG, 700 /*begin_lba*/0,/* Cover the whole disk */ 701 /*lb_count*/0, 702 SSD_FULL_SIZE, 703 5 * 60 * 1000); 704 xpt_polled_action((union ccb *)&csio); 705 706 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 707 if ((csio.ccb_h.status & CAM_STATUS_MASK) == 708 CAM_SCSI_STATUS_ERROR) { 709 int asc, ascq; 710 int sense_key, error_code; 711 712 scsi_extract_sense(&csio.sense_data, 713 &error_code, 714 &sense_key, 715 &asc, &ascq); 716 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 717 scsi_sense_print(&csio); 718 } else { 719 xpt_print_path(periph->path); 720 printf("Synchronize cache failed, status " 721 "== 0x%x, scsi status == 0x%x\n", 722 csio.ccb_h.status, csio.scsi_status); 723 } 724 } 725 } 726 return (0); 727 } 728 729 static void 730 dainit(void) 731 { 732 cam_status status; 733 struct cam_path *path; 734 735 /* 736 * Create our extend array for storing the devices we attach to. 737 */ 738 daperiphs = cam_extend_new(); 739 SLIST_INIT(&softc_list); 740 if (daperiphs == NULL) { 741 printf("da: Failed to alloc extend array!\n"); 742 return; 743 } 744 745 /* 746 * Install a global async callback. This callback will 747 * receive async callbacks like "new device found". 748 */ 749 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 750 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 751 752 if (status == CAM_REQ_CMP) { 753 struct ccb_setasync csa; 754 755 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 756 csa.ccb_h.func_code = XPT_SASYNC_CB; 757 csa.event_enable = AC_FOUND_DEVICE; 758 csa.callback = daasync; 759 csa.callback_arg = NULL; 760 xpt_action((union ccb *)&csa); 761 status = csa.ccb_h.status; 762 xpt_free_path(path); 763 } 764 765 if (status != CAM_REQ_CMP) { 766 printf("da: Failed to attach master async callback " 767 "due to status 0x%x!\n", status); 768 } else { 769 770 /* 771 * Schedule a periodic event to occasioanly send an 772 * ordered tag to a device. 773 */ 774 timeout(dasendorderedtag, NULL, 775 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL); 776 777 /* Register our shutdown event handler */ 778 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, 779 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 780 printf("dainit: shutdown event registration failed!\n"); 781 } 782 } 783 784 static void 785 daoninvalidate(struct cam_periph *periph) 786 { 787 int s; 788 struct da_softc *softc; 789 struct bio *q_bp; 790 struct ccb_setasync csa; 791 792 softc = (struct da_softc *)periph->softc; 793 794 /* 795 * De-register any async callbacks. 796 */ 797 xpt_setup_ccb(&csa.ccb_h, periph->path, 798 /* priority */ 5); 799 csa.ccb_h.func_code = XPT_SASYNC_CB; 800 csa.event_enable = 0; 801 csa.callback = daasync; 802 csa.callback_arg = periph; 803 xpt_action((union ccb *)&csa); 804 805 softc->flags |= DA_FLAG_PACK_INVALID; 806 807 /* 808 * Although the oninvalidate() routines are always called at 809 * splsoftcam, we need to be at splbio() here to keep the buffer 810 * queue from being modified while we traverse it. 811 */ 812 s = splbio(); 813 814 /* 815 * Return all queued I/O with ENXIO. 816 * XXX Handle any transactions queued to the card 817 * with XPT_ABORT_CCB. 818 */ 819 while ((q_bp = bioq_first(&softc->bio_queue)) != NULL){ 820 bioq_remove(&softc->bio_queue, q_bp); 821 q_bp->bio_resid = q_bp->bio_bcount; 822 q_bp->bio_error = ENXIO; 823 q_bp->bio_flags |= BIO_ERROR; 824 biodone(q_bp); 825 } 826 splx(s); 827 828 SLIST_REMOVE(&softc_list, softc, da_softc, links); 829 830 xpt_print_path(periph->path); 831 printf("lost device\n"); 832 } 833 834 static void 835 dacleanup(struct cam_periph *periph) 836 { 837 struct da_softc *softc; 838 839 softc = (struct da_softc *)periph->softc; 840 841 devstat_remove_entry(&softc->device_stats); 842 cam_extend_release(daperiphs, periph->unit_number); 843 xpt_print_path(periph->path); 844 printf("removing device entry\n"); 845 free(softc, M_DEVBUF); 846 } 847 848 static void 849 daasync(void *callback_arg, u_int32_t code, 850 struct cam_path *path, void *arg) 851 { 852 struct cam_periph *periph; 853 854 periph = (struct cam_periph *)callback_arg; 855 switch (code) { 856 case AC_FOUND_DEVICE: 857 { 858 struct ccb_getdev *cgd; 859 cam_status status; 860 861 cgd = (struct ccb_getdev *)arg; 862 863 if (SID_TYPE(&cgd->inq_data) != T_DIRECT 864 && SID_TYPE(&cgd->inq_data) != T_OPTICAL) 865 break; 866 867 /* 868 * Allocate a peripheral instance for 869 * this device and start the probe 870 * process. 871 */ 872 status = cam_periph_alloc(daregister, daoninvalidate, 873 dacleanup, dastart, 874 "da", CAM_PERIPH_BIO, 875 cgd->ccb_h.path, daasync, 876 AC_FOUND_DEVICE, cgd); 877 878 if (status != CAM_REQ_CMP 879 && status != CAM_REQ_INPROG) 880 printf("daasync: Unable to attach to new device " 881 "due to status 0x%x\n", status); 882 break; 883 } 884 case AC_SENT_BDR: 885 case AC_BUS_RESET: 886 { 887 struct da_softc *softc; 888 struct ccb_hdr *ccbh; 889 int s; 890 891 softc = (struct da_softc *)periph->softc; 892 s = splsoftcam(); 893 /* 894 * Don't fail on the expected unit attention 895 * that will occur. 896 */ 897 softc->flags |= DA_FLAG_RETRY_UA; 898 for (ccbh = LIST_FIRST(&softc->pending_ccbs); 899 ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le)) 900 ccbh->ccb_state |= DA_CCB_RETRY_UA; 901 splx(s); 902 /* FALLTHROUGH*/ 903 } 904 default: 905 cam_periph_async(periph, code, path, arg); 906 break; 907 } 908 } 909 910 static cam_status 911 daregister(struct cam_periph *periph, void *arg) 912 { 913 int s; 914 struct da_softc *softc; 915 struct ccb_setasync csa; 916 struct ccb_getdev *cgd; 917 caddr_t match; 918 919 cgd = (struct ccb_getdev *)arg; 920 if (periph == NULL) { 921 printf("daregister: periph was NULL!!\n"); 922 return(CAM_REQ_CMP_ERR); 923 } 924 925 if (cgd == NULL) { 926 printf("daregister: no getdev CCB, can't register device\n"); 927 return(CAM_REQ_CMP_ERR); 928 } 929 930 softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); 931 932 if (softc == NULL) { 933 printf("daregister: Unable to probe new device. " 934 "Unable to allocate softc\n"); 935 return(CAM_REQ_CMP_ERR); 936 } 937 938 bzero(softc, sizeof(*softc)); 939 LIST_INIT(&softc->pending_ccbs); 940 softc->state = DA_STATE_PROBE; 941 bioq_init(&softc->bio_queue); 942 if (SID_IS_REMOVABLE(&cgd->inq_data)) 943 softc->flags |= DA_FLAG_PACK_REMOVABLE; 944 if ((cgd->inq_data.flags & SID_CmdQue) != 0) 945 softc->flags |= DA_FLAG_TAGGED_QUEUING; 946 947 periph->softc = softc; 948 949 cam_extend_set(daperiphs, periph->unit_number, periph); 950 951 /* 952 * See if this device has any quirks. 953 */ 954 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 955 (caddr_t)da_quirk_table, 956 sizeof(da_quirk_table)/sizeof(*da_quirk_table), 957 sizeof(*da_quirk_table), scsi_inquiry_match); 958 959 if (match != NULL) 960 softc->quirks = ((struct da_quirk_entry *)match)->quirks; 961 else 962 softc->quirks = DA_Q_NONE; 963 964 if (softc->quirks & DA_Q_NO_6_BYTE) 965 softc->minimum_cmd_size = 10; 966 else 967 softc->minimum_cmd_size = 6; 968 969 /* 970 * Block our timeout handler while we 971 * add this softc to the dev list. 972 */ 973 s = splsoftclock(); 974 SLIST_INSERT_HEAD(&softc_list, softc, links); 975 splx(s); 976 977 /* 978 * The DA driver supports a blocksize, but 979 * we don't know the blocksize until we do 980 * a read capacity. So, set a flag to 981 * indicate that the blocksize is 982 * unavailable right now. We'll clear the 983 * flag as soon as we've done a read capacity. 984 */ 985 devstat_add_entry(&softc->device_stats, "da", 986 periph->unit_number, 0, 987 DEVSTAT_BS_UNAVAILABLE, 988 SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI, 989 DEVSTAT_PRIORITY_DISK); 990 991 /* 992 * Register this media as a disk 993 */ 994 disk_create(periph->unit_number, &softc->disk, 0, 995 &da_cdevsw, &dadisk_cdevsw); 996 997 /* 998 * Add async callbacks for bus reset and 999 * bus device reset calls. I don't bother 1000 * checking if this fails as, in most cases, 1001 * the system will function just fine without 1002 * them and the only alternative would be to 1003 * not attach the device on failure. 1004 */ 1005 xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); 1006 csa.ccb_h.func_code = XPT_SASYNC_CB; 1007 csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; 1008 csa.callback = daasync; 1009 csa.callback_arg = periph; 1010 xpt_action((union ccb *)&csa); 1011 /* 1012 * Lock this peripheral until we are setup. 1013 * This first call can't block 1014 */ 1015 (void)cam_periph_lock(periph, PRIBIO); 1016 xpt_schedule(periph, /*priority*/5); 1017 1018 return(CAM_REQ_CMP); 1019 } 1020 1021 static void 1022 dastart(struct cam_periph *periph, union ccb *start_ccb) 1023 { 1024 struct da_softc *softc; 1025 1026 softc = (struct da_softc *)periph->softc; 1027 1028 1029 switch (softc->state) { 1030 case DA_STATE_NORMAL: 1031 { 1032 /* Pull a buffer from the queue and get going on it */ 1033 struct bio *bp; 1034 int s; 1035 1036 /* 1037 * See if there is a buf with work for us to do.. 1038 */ 1039 s = splbio(); 1040 bp = bioq_first(&softc->bio_queue); 1041 if (periph->immediate_priority <= periph->pinfo.priority) { 1042 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 1043 ("queuing for immediate ccb\n")); 1044 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING; 1045 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 1046 periph_links.sle); 1047 periph->immediate_priority = CAM_PRIORITY_NONE; 1048 splx(s); 1049 wakeup(&periph->ccb_list); 1050 } else if (bp == NULL) { 1051 splx(s); 1052 xpt_release_ccb(start_ccb); 1053 } else { 1054 int oldspl; 1055 u_int8_t tag_code; 1056 1057 bioq_remove(&softc->bio_queue, bp); 1058 1059 devstat_start_transaction(&softc->device_stats); 1060 1061 if ((bp->bio_flags & BIO_ORDERED) != 0 1062 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { 1063 softc->flags &= ~DA_FLAG_NEED_OTAG; 1064 softc->ordered_tag_count++; 1065 tag_code = MSG_ORDERED_Q_TAG; 1066 } else { 1067 tag_code = MSG_SIMPLE_Q_TAG; 1068 } 1069 scsi_read_write(&start_ccb->csio, 1070 /*retries*/4, 1071 dadone, 1072 tag_code, 1073 bp->bio_cmd == BIO_READ, 1074 /*byte2*/0, 1075 softc->minimum_cmd_size, 1076 bp->bio_pblkno, 1077 bp->bio_bcount / softc->params.secsize, 1078 bp->bio_data, 1079 bp->bio_bcount, 1080 /*sense_len*/SSD_FULL_SIZE, 1081 DA_DEFAULT_TIMEOUT * 1000); 1082 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 1083 1084 /* 1085 * Block out any asyncronous callbacks 1086 * while we touch the pending ccb list. 1087 */ 1088 oldspl = splcam(); 1089 LIST_INSERT_HEAD(&softc->pending_ccbs, 1090 &start_ccb->ccb_h, periph_links.le); 1091 splx(oldspl); 1092 1093 /* We expect a unit attention from this device */ 1094 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 1095 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 1096 softc->flags &= ~DA_FLAG_RETRY_UA; 1097 } 1098 1099 start_ccb->ccb_h.ccb_bp = bp; 1100 bp = bioq_first(&softc->bio_queue); 1101 splx(s); 1102 1103 xpt_action(start_ccb); 1104 } 1105 1106 if (bp != NULL) { 1107 /* Have more work to do, so ensure we stay scheduled */ 1108 xpt_schedule(periph, /* XXX priority */1); 1109 } 1110 break; 1111 } 1112 case DA_STATE_PROBE: 1113 { 1114 struct ccb_scsiio *csio; 1115 struct scsi_read_capacity_data *rcap; 1116 1117 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 1118 M_TEMP, 1119 M_NOWAIT); 1120 if (rcap == NULL) { 1121 printf("dastart: Couldn't malloc read_capacity data\n"); 1122 /* da_free_periph??? */ 1123 break; 1124 } 1125 csio = &start_ccb->csio; 1126 scsi_read_capacity(csio, 1127 /*retries*/4, 1128 dadone, 1129 MSG_SIMPLE_Q_TAG, 1130 rcap, 1131 SSD_FULL_SIZE, 1132 /*timeout*/5000); 1133 start_ccb->ccb_h.ccb_bp = NULL; 1134 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE; 1135 xpt_action(start_ccb); 1136 break; 1137 } 1138 } 1139 } 1140 1141 1142 static void 1143 dadone(struct cam_periph *periph, union ccb *done_ccb) 1144 { 1145 struct da_softc *softc; 1146 struct ccb_scsiio *csio; 1147 1148 softc = (struct da_softc *)periph->softc; 1149 csio = &done_ccb->csio; 1150 switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) { 1151 case DA_CCB_BUFFER_IO: 1152 { 1153 struct bio *bp; 1154 int oldspl; 1155 1156 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 1157 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1158 int error; 1159 int s; 1160 int sf; 1161 1162 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 1163 sf = SF_RETRY_UA; 1164 else 1165 sf = 0; 1166 1167 /* Retry selection timeouts */ 1168 sf |= SF_RETRY_SELTO; 1169 1170 if ((error = daerror(done_ccb, 0, sf)) == ERESTART) { 1171 /* 1172 * A retry was scheuled, so 1173 * just return. 1174 */ 1175 return; 1176 } 1177 if (error != 0) { 1178 struct bio *q_bp; 1179 1180 s = splbio(); 1181 1182 if (error == ENXIO) { 1183 /* 1184 * Catastrophic error. Mark our pack as 1185 * invalid. 1186 */ 1187 /* XXX See if this is really a media 1188 * change first. 1189 */ 1190 xpt_print_path(periph->path); 1191 printf("Invalidating pack\n"); 1192 softc->flags |= DA_FLAG_PACK_INVALID; 1193 } 1194 1195 /* 1196 * return all queued I/O with EIO, so that 1197 * the client can retry these I/Os in the 1198 * proper order should it attempt to recover. 1199 */ 1200 while ((q_bp = bioq_first(&softc->bio_queue)) 1201 != NULL) { 1202 bioq_remove(&softc->bio_queue, q_bp); 1203 q_bp->bio_resid = q_bp->bio_bcount; 1204 q_bp->bio_error = EIO; 1205 q_bp->bio_flags |= BIO_ERROR; 1206 biodone(q_bp); 1207 } 1208 splx(s); 1209 bp->bio_error = error; 1210 bp->bio_resid = bp->bio_bcount; 1211 bp->bio_flags |= BIO_ERROR; 1212 } else { 1213 bp->bio_resid = csio->resid; 1214 bp->bio_error = 0; 1215 if (bp->bio_resid != 0) { 1216 /* Short transfer ??? */ 1217 bp->bio_flags |= BIO_ERROR; 1218 } 1219 } 1220 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1221 cam_release_devq(done_ccb->ccb_h.path, 1222 /*relsim_flags*/0, 1223 /*reduction*/0, 1224 /*timeout*/0, 1225 /*getcount_only*/0); 1226 } else { 1227 bp->bio_resid = csio->resid; 1228 if (csio->resid > 0) 1229 bp->bio_flags |= BIO_ERROR; 1230 } 1231 1232 /* 1233 * Block out any asyncronous callbacks 1234 * while we touch the pending ccb list. 1235 */ 1236 oldspl = splcam(); 1237 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 1238 splx(oldspl); 1239 1240 if (softc->device_stats.busy_count == 0) 1241 softc->flags |= DA_FLAG_WENT_IDLE; 1242 1243 devstat_end_transaction_bio(&softc->device_stats, bp); 1244 biodone(bp); 1245 break; 1246 } 1247 case DA_CCB_PROBE: 1248 { 1249 struct scsi_read_capacity_data *rdcap; 1250 char announce_buf[80]; 1251 1252 rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; 1253 1254 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 1255 struct disk_params *dp; 1256 1257 dasetgeom(periph, rdcap); 1258 dp = &softc->params; 1259 snprintf(announce_buf, sizeof(announce_buf), 1260 "%luMB (%u %u byte sectors: %dH %dS/T %dC)", 1261 (unsigned long) (((u_int64_t)dp->secsize * 1262 dp->sectors) / (1024*1024)), dp->sectors, 1263 dp->secsize, dp->heads, dp->secs_per_track, 1264 dp->cylinders); 1265 } else { 1266 int error; 1267 1268 announce_buf[0] = '\0'; 1269 1270 /* 1271 * Retry any UNIT ATTENTION type errors. They 1272 * are expected at boot. 1273 */ 1274 error = daerror(done_ccb, 0, SF_RETRY_UA | 1275 SF_RETRY_SELTO | SF_NO_PRINT); 1276 if (error == ERESTART) { 1277 /* 1278 * A retry was scheuled, so 1279 * just return. 1280 */ 1281 return; 1282 } else if (error != 0) { 1283 struct scsi_sense_data *sense; 1284 int asc, ascq; 1285 int sense_key, error_code; 1286 int have_sense; 1287 cam_status status; 1288 struct ccb_getdev cgd; 1289 1290 /* Don't wedge this device's queue */ 1291 cam_release_devq(done_ccb->ccb_h.path, 1292 /*relsim_flags*/0, 1293 /*reduction*/0, 1294 /*timeout*/0, 1295 /*getcount_only*/0); 1296 1297 status = done_ccb->ccb_h.status; 1298 1299 xpt_setup_ccb(&cgd.ccb_h, 1300 done_ccb->ccb_h.path, 1301 /* priority */ 1); 1302 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1303 xpt_action((union ccb *)&cgd); 1304 1305 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0) 1306 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0) 1307 || ((status & CAM_AUTOSNS_VALID) == 0)) 1308 have_sense = FALSE; 1309 else 1310 have_sense = TRUE; 1311 1312 if (have_sense) { 1313 sense = &csio->sense_data; 1314 scsi_extract_sense(sense, &error_code, 1315 &sense_key, 1316 &asc, &ascq); 1317 } 1318 /* 1319 * Attach to anything that claims to be a 1320 * direct access or optical disk device, 1321 * as long as it doesn't return a "Logical 1322 * unit not supported" (0x25) error. 1323 */ 1324 if ((have_sense) && (asc != 0x25) 1325 && (error_code == SSD_CURRENT_ERROR)) 1326 snprintf(announce_buf, 1327 sizeof(announce_buf), 1328 "Attempt to query device " 1329 "size failed: %s, %s", 1330 scsi_sense_key_text[sense_key], 1331 scsi_sense_desc(asc,ascq, 1332 &cgd.inq_data)); 1333 else { 1334 if (have_sense) 1335 scsi_sense_print( 1336 &done_ccb->csio); 1337 else { 1338 xpt_print_path(periph->path); 1339 printf("got CAM status %#x\n", 1340 done_ccb->ccb_h.status); 1341 } 1342 1343 xpt_print_path(periph->path); 1344 printf("fatal error, failed" 1345 " to attach to device\n"); 1346 1347 /* 1348 * Free up resources. 1349 */ 1350 cam_periph_invalidate(periph); 1351 } 1352 } 1353 } 1354 free(rdcap, M_TEMP); 1355 if (announce_buf[0] != '\0') 1356 xpt_announce_periph(periph, announce_buf); 1357 softc->state = DA_STATE_NORMAL; 1358 /* 1359 * Since our peripheral may be invalidated by an error 1360 * above or an external event, we must release our CCB 1361 * before releasing the probe lock on the peripheral. 1362 * The peripheral will only go away once the last lock 1363 * is removed, and we need it around for the CCB release 1364 * operation. 1365 */ 1366 xpt_release_ccb(done_ccb); 1367 cam_periph_unlock(periph); 1368 return; 1369 } 1370 case DA_CCB_WAITING: 1371 { 1372 /* Caller will release the CCB */ 1373 wakeup(&done_ccb->ccb_h.cbfcnp); 1374 return; 1375 } 1376 case DA_CCB_DUMP: 1377 /* No-op. We're polling */ 1378 return; 1379 default: 1380 break; 1381 } 1382 xpt_release_ccb(done_ccb); 1383 } 1384 1385 static int 1386 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1387 { 1388 struct da_softc *softc; 1389 struct cam_periph *periph; 1390 1391 periph = xpt_path_periph(ccb->ccb_h.path); 1392 softc = (struct da_softc *)periph->softc; 1393 1394 /* 1395 * XXX 1396 * Until we have a better way of doing pack validation, 1397 * don't treat UAs as errors. 1398 */ 1399 sense_flags |= SF_RETRY_UA; 1400 return(cam_periph_error(ccb, cam_flags, sense_flags, 1401 &softc->saved_ccb)); 1402 } 1403 1404 static void 1405 daprevent(struct cam_periph *periph, int action) 1406 { 1407 struct da_softc *softc; 1408 union ccb *ccb; 1409 int error; 1410 1411 softc = (struct da_softc *)periph->softc; 1412 1413 if (((action == PR_ALLOW) 1414 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 1415 || ((action == PR_PREVENT) 1416 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 1417 return; 1418 } 1419 1420 ccb = cam_periph_getccb(periph, /*priority*/1); 1421 1422 scsi_prevent(&ccb->csio, 1423 /*retries*/1, 1424 /*cbcfp*/dadone, 1425 MSG_SIMPLE_Q_TAG, 1426 action, 1427 SSD_FULL_SIZE, 1428 5000); 1429 1430 error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 1431 /*sense_flags*/0, &softc->device_stats); 1432 1433 if (error == 0) { 1434 if (action == PR_ALLOW) 1435 softc->flags &= ~DA_FLAG_PACK_LOCKED; 1436 else 1437 softc->flags |= DA_FLAG_PACK_LOCKED; 1438 } 1439 1440 xpt_release_ccb(ccb); 1441 } 1442 1443 static void 1444 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap) 1445 { 1446 struct ccb_calc_geometry ccg; 1447 struct da_softc *softc; 1448 struct disk_params *dp; 1449 1450 softc = (struct da_softc *)periph->softc; 1451 1452 dp = &softc->params; 1453 dp->secsize = scsi_4btoul(rdcap->length); 1454 dp->sectors = scsi_4btoul(rdcap->addr) + 1; 1455 /* 1456 * Have the controller provide us with a geometry 1457 * for this disk. The only time the geometry 1458 * matters is when we boot and the controller 1459 * is the only one knowledgeable enough to come 1460 * up with something that will make this a bootable 1461 * device. 1462 */ 1463 xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1); 1464 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 1465 ccg.block_size = dp->secsize; 1466 ccg.volume_size = dp->sectors; 1467 ccg.heads = 0; 1468 ccg.secs_per_track = 0; 1469 ccg.cylinders = 0; 1470 xpt_action((union ccb*)&ccg); 1471 dp->heads = ccg.heads; 1472 dp->secs_per_track = ccg.secs_per_track; 1473 dp->cylinders = ccg.cylinders; 1474 } 1475 1476 static void 1477 dasendorderedtag(void *arg) 1478 { 1479 struct da_softc *softc; 1480 int s; 1481 1482 for (softc = SLIST_FIRST(&softc_list); 1483 softc != NULL; 1484 softc = SLIST_NEXT(softc, links)) { 1485 s = splsoftcam(); 1486 if ((softc->ordered_tag_count == 0) 1487 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) { 1488 softc->flags |= DA_FLAG_NEED_OTAG; 1489 } 1490 if (softc->device_stats.busy_count > 0) 1491 softc->flags &= ~DA_FLAG_WENT_IDLE; 1492 1493 softc->ordered_tag_count = 0; 1494 splx(s); 1495 } 1496 /* Queue us up again */ 1497 timeout(dasendorderedtag, NULL, 1498 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL); 1499 } 1500 1501 /* 1502 * Step through all DA peripheral drivers, and if the device is still open, 1503 * sync the disk cache to physical media. 1504 */ 1505 static void 1506 dashutdown(void * arg, int howto) 1507 { 1508 struct cam_periph *periph; 1509 struct da_softc *softc; 1510 1511 for (periph = TAILQ_FIRST(&dadriver.units); periph != NULL; 1512 periph = TAILQ_NEXT(periph, unit_links)) { 1513 union ccb ccb; 1514 softc = (struct da_softc *)periph->softc; 1515 1516 /* 1517 * We only sync the cache if the drive is still open, and 1518 * if the drive is capable of it.. 1519 */ 1520 if (((softc->flags & DA_FLAG_OPEN) == 0) 1521 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) 1522 continue; 1523 1524 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1); 1525 1526 ccb.ccb_h.ccb_state = DA_CCB_DUMP; 1527 scsi_synchronize_cache(&ccb.csio, 1528 /*retries*/1, 1529 /*cbfcnp*/dadone, 1530 MSG_SIMPLE_Q_TAG, 1531 /*begin_lba*/0, /* whole disk */ 1532 /*lb_count*/0, 1533 SSD_FULL_SIZE, 1534 5 * 60 * 1000); 1535 1536 xpt_polled_action(&ccb); 1537 1538 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1539 if (((ccb.ccb_h.status & CAM_STATUS_MASK) == 1540 CAM_SCSI_STATUS_ERROR) 1541 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){ 1542 int error_code, sense_key, asc, ascq; 1543 1544 scsi_extract_sense(&ccb.csio.sense_data, 1545 &error_code, &sense_key, 1546 &asc, &ascq); 1547 1548 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 1549 scsi_sense_print(&ccb.csio); 1550 } else { 1551 xpt_print_path(periph->path); 1552 printf("Synchronize cache failed, status " 1553 "== 0x%x, scsi status == 0x%x\n", 1554 ccb.ccb_h.status, ccb.csio.scsi_status); 1555 } 1556 } 1557 1558 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 1559 cam_release_devq(ccb.ccb_h.path, 1560 /*relsim_flags*/0, 1561 /*reduction*/0, 1562 /*timeout*/0, 1563 /*getcount_only*/0); 1564 1565 } 1566 } 1567 1568 #else /* !_KERNEL */ 1569 1570 /* 1571 * XXX This is only left out of the kernel build to silence warnings. If, 1572 * for some reason this function is used in the kernel, the ifdefs should 1573 * be moved so it is included both in the kernel and userland. 1574 */ 1575 void 1576 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, 1577 void (*cbfcnp)(struct cam_periph *, union ccb *), 1578 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, 1579 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 1580 u_int32_t timeout) 1581 { 1582 struct scsi_format_unit *scsi_cmd; 1583 1584 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; 1585 scsi_cmd->opcode = FORMAT_UNIT; 1586 scsi_cmd->byte2 = byte2; 1587 scsi_ulto2b(ileave, scsi_cmd->interleave); 1588 1589 cam_fill_csio(csio, 1590 retries, 1591 cbfcnp, 1592 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 1593 tag_action, 1594 data_ptr, 1595 dxfer_len, 1596 sense_len, 1597 sizeof(*scsi_cmd), 1598 timeout); 1599 } 1600 1601 #endif /* _KERNEL */ 1602