1 /* 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * Copyright (c) 1997 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifdef _KERNEL 32 #include "opt_hw_wdog.h" 33 #endif /* _KERNEL */ 34 35 #include <sys/param.h> 36 37 #ifdef _KERNEL 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/bio.h> 41 #include <sys/sysctl.h> 42 #endif /* _KERNEL */ 43 44 #include <sys/devicestat.h> 45 #include <sys/conf.h> 46 #include <sys/disk.h> 47 #include <sys/eventhandler.h> 48 #include <sys/malloc.h> 49 #include <sys/cons.h> 50 51 #include <machine/md_var.h> 52 53 #include <vm/vm.h> 54 #include <vm/pmap.h> 55 56 #ifndef _KERNEL 57 #include <stdio.h> 58 #include <string.h> 59 #endif /* _KERNEL */ 60 61 #include <cam/cam.h> 62 #include <cam/cam_ccb.h> 63 #include <cam/cam_extend.h> 64 #include <cam/cam_periph.h> 65 #include <cam/cam_xpt_periph.h> 66 67 #include <cam/scsi/scsi_message.h> 68 69 #ifndef _KERNEL 70 #include <cam/scsi/scsi_da.h> 71 #endif /* !_KERNEL */ 72 73 #ifdef _KERNEL 74 typedef enum { 75 DA_STATE_PROBE, 76 DA_STATE_NORMAL 77 } da_state; 78 79 typedef enum { 80 DA_FLAG_PACK_INVALID = 0x001, 81 DA_FLAG_NEW_PACK = 0x002, 82 DA_FLAG_PACK_LOCKED = 0x004, 83 DA_FLAG_PACK_REMOVABLE = 0x008, 84 DA_FLAG_TAGGED_QUEUING = 0x010, 85 DA_FLAG_NEED_OTAG = 0x020, 86 DA_FLAG_WENT_IDLE = 0x040, 87 DA_FLAG_RETRY_UA = 0x080, 88 DA_FLAG_OPEN = 0x100 89 } da_flags; 90 91 typedef enum { 92 DA_Q_NONE = 0x00, 93 DA_Q_NO_SYNC_CACHE = 0x01, 94 DA_Q_NO_6_BYTE = 0x02 95 } da_quirks; 96 97 typedef enum { 98 DA_CCB_PROBE = 0x01, 99 DA_CCB_BUFFER_IO = 0x02, 100 DA_CCB_WAITING = 0x03, 101 DA_CCB_DUMP = 0x04, 102 DA_CCB_TYPE_MASK = 0x0F, 103 DA_CCB_RETRY_UA = 0x10 104 } da_ccb_state; 105 106 /* Offsets into our private area for storing information */ 107 #define ccb_state ppriv_field0 108 #define ccb_bp ppriv_ptr1 109 110 struct disk_params { 111 u_int8_t heads; 112 u_int16_t cylinders; 113 u_int8_t secs_per_track; 114 u_int32_t secsize; /* Number of bytes/sector */ 115 u_int32_t sectors; /* total number sectors */ 116 }; 117 118 struct da_softc { 119 struct bio_queue_head bio_queue; 120 struct devstat device_stats; 121 SLIST_ENTRY(da_softc) links; 122 LIST_HEAD(, ccb_hdr) pending_ccbs; 123 da_state state; 124 da_flags flags; 125 da_quirks quirks; 126 int minimum_cmd_size; 127 int ordered_tag_count; 128 struct disk_params params; 129 struct disk disk; 130 union ccb saved_ccb; 131 dev_t dev; 132 }; 133 134 struct da_quirk_entry { 135 struct scsi_inquiry_pattern inq_pat; 136 da_quirks quirks; 137 }; 138 139 static const char quantum[] = "QUANTUM"; 140 static const char microp[] = "MICROP"; 141 142 static struct da_quirk_entry da_quirk_table[] = 143 { 144 { 145 /* 146 * This particular Fujitsu drive doesn't like the 147 * synchronize cache command. 148 * Reported by: Tom Jackson <toj@gorilla.net> 149 */ 150 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, 151 /*quirks*/ DA_Q_NO_SYNC_CACHE 152 153 }, 154 { 155 /* 156 * This drive doesn't like the synchronize cache command 157 * either. Reported by: Matthew Jacob <mjacob@feral.com> 158 * in NetBSD PR kern/6027, August 24, 1998. 159 */ 160 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, 161 /*quirks*/ DA_Q_NO_SYNC_CACHE 162 }, 163 { 164 /* 165 * This drive doesn't like the synchronize cache command 166 * either. Reported by: Hellmuth Michaelis (hm@kts.org) 167 * (PR 8882). 168 */ 169 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, 170 /*quirks*/ DA_Q_NO_SYNC_CACHE 171 }, 172 { 173 /* 174 * Doesn't like the synchronize cache command. 175 * Reported by: Blaz Zupan <blaz@gold.amis.net> 176 */ 177 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, 178 /*quirks*/ DA_Q_NO_SYNC_CACHE 179 }, 180 { 181 /* 182 * Doesn't like the synchronize cache command. 183 */ 184 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, 185 /*quirks*/ DA_Q_NO_SYNC_CACHE 186 }, 187 { 188 /* 189 * Doesn't like the synchronize cache command. 190 */ 191 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, 192 /*quirks*/ DA_Q_NO_SYNC_CACHE 193 }, 194 { 195 /* 196 * Doesn't work correctly with 6 byte reads/writes. 197 * Returns illegal request, and points to byte 9 of the 198 * 6-byte CDB. 199 * Reported by: Adam McDougall <bsdx@spawnet.com> 200 */ 201 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, 202 /*quirks*/ DA_Q_NO_6_BYTE 203 }, 204 { 205 /* 206 * See above. 207 */ 208 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, 209 /*quirks*/ DA_Q_NO_6_BYTE 210 }, 211 212 /* Below a list of quirks for USB devices supported by umass. */ 213 { 214 /* 215 * This USB floppy drive uses the UFI command set. This 216 * command set is a derivative of the ATAPI command set and 217 * does not support READ_6 commands only READ_10. It also does 218 * not support sync cache (0x35). 219 */ 220 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Y-E DATA", "USB-FDU", "*"}, 221 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 222 }, 223 { 224 /* Another USB floppy */ 225 {T_DIRECT, SIP_MEDIA_REMOVABLE, "MATSHITA", "FDD CF-VFDU*","*"}, 226 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 227 }, 228 { 229 /* 230 * Sony Memory Stick adapter MSAC-US1 and 231 * Sony PCG-C1VJ Internal Memory Stick Slot (MSC-U01). 232 * Make all sony MS* products use this quirk. 233 */ 234 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "MS*", "*"}, 235 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 236 }, 237 { 238 /* 239 * Sony DSC cameras (DSC-S30, DSC-S50, DSC-S70) 240 */ 241 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, 242 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 243 }, 244 { 245 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "MCF3064AP", "*"}, 246 /*quirks*/ DA_Q_NO_6_BYTE 247 }, 248 { 249 /* 250 * Microtech USB CameraMate 251 */ 252 {T_DIRECT, SIP_MEDIA_REMOVABLE, "eUSB Compact*", "Compact Flash*", "*"}, 253 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 254 }, 255 { 256 /* 257 * The vendor, product and version strings coming from the 258 * controller are null terminated instead of being padded with 259 * spaces. The trailing wildcard character '*' is required. 260 */ 261 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SMSC*", "USB FDC*","*"}, 262 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 263 }, 264 { 265 /* 266 * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1) 267 */ 268 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C-*", "*"}, 269 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 270 }, 271 { 272 /* 273 * KingByte Pen Drives 274 */ 275 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NO BRAND", "PEN DRIVE", "*"}, 276 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 277 }, 278 { 279 /* 280 * FujiFilm Camera 281 */ 282 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJIFILMUSB-DRIVEUNIT", "USB-DRIVEUNIT", "*"}, 283 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 284 } 285 }; 286 287 static d_open_t daopen; 288 static d_close_t daclose; 289 static d_strategy_t dastrategy; 290 static d_ioctl_t daioctl; 291 static d_dump_t dadump; 292 static periph_init_t dainit; 293 static void daasync(void *callback_arg, u_int32_t code, 294 struct cam_path *path, void *arg); 295 static periph_ctor_t daregister; 296 static periph_dtor_t dacleanup; 297 static periph_start_t dastart; 298 static periph_oninv_t daoninvalidate; 299 static void dadone(struct cam_periph *periph, 300 union ccb *done_ccb); 301 static int daerror(union ccb *ccb, u_int32_t cam_flags, 302 u_int32_t sense_flags); 303 static void daprevent(struct cam_periph *periph, int action); 304 static void dasetgeom(struct cam_periph *periph, 305 struct scsi_read_capacity_data * rdcap); 306 static timeout_t dasendorderedtag; 307 static void dashutdown(void *arg, int howto); 308 309 #ifndef DA_DEFAULT_TIMEOUT 310 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 311 #endif 312 313 #ifndef DA_DEFAULT_RETRY 314 #define DA_DEFAULT_RETRY 4 315 #endif 316 317 static int da_retry_count = DA_DEFAULT_RETRY; 318 static int da_default_timeout = DA_DEFAULT_TIMEOUT; 319 320 SYSCTL_NODE(_kern, OID_AUTO, cam, CTLFLAG_RD, 0, "CAM Subsystem"); 321 SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, 322 "CAM Direct Access Disk driver"); 323 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW, 324 &da_retry_count, 0, "Normal I/O retry count"); 325 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW, 326 &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); 327 328 /* 329 * DA_ORDEREDTAG_INTERVAL determines how often, relative 330 * to the default timeout, we check to see whether an ordered 331 * tagged transaction is appropriate to prevent simple tag 332 * starvation. Since we'd like to ensure that there is at least 333 * 1/2 of the timeout length left for a starved transaction to 334 * complete after we've sent an ordered tag, we must poll at least 335 * four times in every timeout period. This takes care of the worst 336 * case where a starved transaction starts during an interval that 337 * meets the requirement "don't send an ordered tag" test so it takes 338 * us two intervals to determine that a tag must be sent. 339 */ 340 #ifndef DA_ORDEREDTAG_INTERVAL 341 #define DA_ORDEREDTAG_INTERVAL 4 342 #endif 343 344 static struct periph_driver dadriver = 345 { 346 dainit, "da", 347 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 348 }; 349 350 PERIPHDRIVER_DECLARE(da, dadriver); 351 352 #define DA_CDEV_MAJOR 13 353 354 /* For 2.2-stable support */ 355 #ifndef D_DISK 356 #define D_DISK 0 357 #endif 358 359 static struct cdevsw da_cdevsw = { 360 /* open */ daopen, 361 /* close */ daclose, 362 /* read */ physread, 363 /* write */ physwrite, 364 /* ioctl */ daioctl, 365 /* poll */ nopoll, 366 /* mmap */ nommap, 367 /* strategy */ dastrategy, 368 /* name */ "da", 369 /* maj */ DA_CDEV_MAJOR, 370 /* dump */ dadump, 371 /* psize */ nopsize, 372 /* flags */ D_DISK, 373 }; 374 375 static struct cdevsw dadisk_cdevsw; 376 377 static SLIST_HEAD(,da_softc) softc_list; 378 static struct extend_array *daperiphs; 379 380 static int 381 daopen(dev_t dev, int flags, int fmt, struct proc *p) 382 { 383 struct cam_periph *periph; 384 struct da_softc *softc; 385 struct disklabel *label; 386 struct scsi_read_capacity_data *rcap; 387 union ccb *ccb; 388 int unit; 389 int part; 390 int error; 391 int s; 392 393 unit = dkunit(dev); 394 part = dkpart(dev); 395 s = splsoftcam(); 396 periph = cam_extend_get(daperiphs, unit); 397 if (periph == NULL) { 398 splx(s); 399 return (ENXIO); 400 } 401 402 softc = (struct da_softc *)periph->softc; 403 404 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 405 ("daopen: dev=%s (unit %d , partition %d)\n", devtoname(dev), 406 unit, part)); 407 408 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) 409 return (error); /* error code from tsleep */ 410 411 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 412 return(ENXIO); 413 softc->flags |= DA_FLAG_OPEN; 414 415 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { 416 /* Invalidate our pack information. */ 417 disk_invalidate(&softc->disk); 418 softc->flags &= ~DA_FLAG_PACK_INVALID; 419 } 420 splx(s); 421 422 /* Do a read capacity */ 423 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 424 M_TEMP, 425 M_WAITOK); 426 427 ccb = cam_periph_getccb(periph, /*priority*/1); 428 scsi_read_capacity(&ccb->csio, 429 /*retries*/4, 430 /*cbfncp*/dadone, 431 MSG_SIMPLE_Q_TAG, 432 rcap, 433 SSD_FULL_SIZE, 434 /*timeout*/60000); 435 ccb->ccb_h.ccb_bp = NULL; 436 437 error = cam_periph_runccb(ccb, daerror, 438 /*cam_flags*/CAM_RETRY_SELTO, 439 /*sense_flags*/SF_RETRY_UA, 440 &softc->device_stats); 441 442 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 443 cam_release_devq(ccb->ccb_h.path, 444 /*relsim_flags*/0, 445 /*reduction*/0, 446 /*timeout*/0, 447 /*getcount_only*/0); 448 xpt_release_ccb(ccb); 449 450 if (error == 0) 451 dasetgeom(periph, rcap); 452 453 free(rcap, M_TEMP); 454 455 if (error == 0) { 456 struct ccb_getdev cgd; 457 458 /* Build label for whole disk. */ 459 label = &softc->disk.d_label; 460 bzero(label, sizeof(*label)); 461 label->d_type = DTYPE_SCSI; 462 463 /* 464 * Grab the inquiry data to get the vendor and product names. 465 * Put them in the typename and packname for the label. 466 */ 467 xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1); 468 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 469 xpt_action((union ccb *)&cgd); 470 471 strncpy(label->d_typename, cgd.inq_data.vendor, 472 min(SID_VENDOR_SIZE, sizeof(label->d_typename))); 473 strncpy(label->d_packname, cgd.inq_data.product, 474 min(SID_PRODUCT_SIZE, sizeof(label->d_packname))); 475 476 label->d_secsize = softc->params.secsize; 477 label->d_nsectors = softc->params.secs_per_track; 478 label->d_ntracks = softc->params.heads; 479 label->d_ncylinders = softc->params.cylinders; 480 label->d_secpercyl = softc->params.heads 481 * softc->params.secs_per_track; 482 label->d_secperunit = softc->params.sectors; 483 484 /* 485 * Check to see whether or not the blocksize is set yet. 486 * If it isn't, set it and then clear the blocksize 487 * unavailable flag for the device statistics. 488 */ 489 if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){ 490 softc->device_stats.block_size = softc->params.secsize; 491 softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE; 492 } 493 } 494 495 if (error == 0) { 496 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) 497 daprevent(periph, PR_PREVENT); 498 } 499 cam_periph_unlock(periph); 500 return (error); 501 } 502 503 static int 504 daclose(dev_t dev, int flag, int fmt, struct proc *p) 505 { 506 struct cam_periph *periph; 507 struct da_softc *softc; 508 int unit; 509 int error; 510 511 unit = dkunit(dev); 512 periph = cam_extend_get(daperiphs, unit); 513 if (periph == NULL) 514 return (ENXIO); 515 516 softc = (struct da_softc *)periph->softc; 517 518 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) { 519 return (error); /* error code from tsleep */ 520 } 521 522 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 523 union ccb *ccb; 524 525 ccb = cam_periph_getccb(periph, /*priority*/1); 526 527 scsi_synchronize_cache(&ccb->csio, 528 /*retries*/1, 529 /*cbfcnp*/dadone, 530 MSG_SIMPLE_Q_TAG, 531 /*begin_lba*/0,/* Cover the whole disk */ 532 /*lb_count*/0, 533 SSD_FULL_SIZE, 534 5 * 60 * 1000); 535 536 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 537 /*sense_flags*/SF_RETRY_UA, 538 &softc->device_stats); 539 540 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 541 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == 542 CAM_SCSI_STATUS_ERROR) { 543 int asc, ascq; 544 int sense_key, error_code; 545 546 scsi_extract_sense(&ccb->csio.sense_data, 547 &error_code, 548 &sense_key, 549 &asc, &ascq); 550 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 551 scsi_sense_print(&ccb->csio); 552 } else { 553 xpt_print_path(periph->path); 554 printf("Synchronize cache failed, status " 555 "== 0x%x, scsi status == 0x%x\n", 556 ccb->csio.ccb_h.status, 557 ccb->csio.scsi_status); 558 } 559 } 560 561 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 562 cam_release_devq(ccb->ccb_h.path, 563 /*relsim_flags*/0, 564 /*reduction*/0, 565 /*timeout*/0, 566 /*getcount_only*/0); 567 568 xpt_release_ccb(ccb); 569 570 } 571 572 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) { 573 daprevent(periph, PR_ALLOW); 574 /* 575 * If we've got removeable media, mark the blocksize as 576 * unavailable, since it could change when new media is 577 * inserted. 578 */ 579 softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE; 580 } 581 582 softc->flags &= ~DA_FLAG_OPEN; 583 cam_periph_unlock(periph); 584 cam_periph_release(periph); 585 return (0); 586 } 587 588 /* 589 * Actually translate the requested transfer into one the physical driver 590 * can understand. The transfer is described by a buf and will include 591 * only one physical transfer. 592 */ 593 static void 594 dastrategy(struct bio *bp) 595 { 596 struct cam_periph *periph; 597 struct da_softc *softc; 598 u_int unit; 599 u_int part; 600 int s; 601 602 unit = dkunit(bp->bio_dev); 603 part = dkpart(bp->bio_dev); 604 periph = cam_extend_get(daperiphs, unit); 605 if (periph == NULL) { 606 biofinish(bp, NULL, ENXIO); 607 return; 608 } 609 softc = (struct da_softc *)periph->softc; 610 #if 0 611 /* 612 * check it's not too big a transfer for our adapter 613 */ 614 scsi_minphys(bp,&sd_switch); 615 #endif 616 617 /* 618 * Mask interrupts so that the pack cannot be invalidated until 619 * after we are in the queue. Otherwise, we might not properly 620 * clean up one of the buffers. 621 */ 622 s = splbio(); 623 624 /* 625 * If the device has been made invalid, error out 626 */ 627 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 628 splx(s); 629 biofinish(bp, NULL, ENXIO); 630 return; 631 } 632 633 /* 634 * Place it in the queue of disk activities for this disk 635 */ 636 bioqdisksort(&softc->bio_queue, bp); 637 638 splx(s); 639 640 /* 641 * Schedule ourselves for performing the work. 642 */ 643 xpt_schedule(periph, /* XXX priority */1); 644 645 return; 646 } 647 648 /* For 2.2-stable support */ 649 #ifndef ENOIOCTL 650 #define ENOIOCTL -1 651 #endif 652 653 static int 654 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 655 { 656 struct cam_periph *periph; 657 struct da_softc *softc; 658 int unit; 659 int error; 660 661 unit = dkunit(dev); 662 periph = cam_extend_get(daperiphs, unit); 663 if (periph == NULL) 664 return (ENXIO); 665 666 softc = (struct da_softc *)periph->softc; 667 668 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n")); 669 670 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { 671 return (error); /* error code from tsleep */ 672 } 673 674 error = cam_periph_ioctl(periph, cmd, addr, daerror); 675 676 cam_periph_unlock(periph); 677 678 return (error); 679 } 680 681 static int 682 dadump(dev_t dev) 683 { 684 struct cam_periph *periph; 685 struct da_softc *softc; 686 u_int unit; 687 u_int part; 688 u_int secsize; 689 u_int num; /* number of sectors to write */ 690 u_int blknum; 691 long blkcnt; 692 vm_offset_t addr; 693 struct ccb_scsiio csio; 694 int dumppages = MAXDUMPPGS; 695 int error; 696 int i; 697 698 /* toss any characters present prior to dump */ 699 while (cncheckc() != -1) 700 ; 701 702 unit = dkunit(dev); 703 part = dkpart(dev); 704 periph = cam_extend_get(daperiphs, unit); 705 if (periph == NULL) { 706 return (ENXIO); 707 } 708 softc = (struct da_softc *)periph->softc; 709 710 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) 711 return (ENXIO); 712 713 error = disk_dumpcheck(dev, &num, &blknum, &secsize); 714 if (error) 715 return (error); 716 717 addr = 0; /* starting address */ 718 blkcnt = howmany(PAGE_SIZE, secsize); 719 720 while (num > 0) { 721 caddr_t va = NULL; 722 723 if ((num / blkcnt) < dumppages) 724 dumppages = num / blkcnt; 725 726 for (i = 0; i < dumppages; ++i) { 727 vm_offset_t a = addr + (i * PAGE_SIZE); 728 if (is_physical_memory(a)) 729 va = pmap_kenter_temporary(trunc_page(a), i); 730 else 731 va = pmap_kenter_temporary(trunc_page(0), i); 732 } 733 734 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 735 csio.ccb_h.ccb_state = DA_CCB_DUMP; 736 scsi_read_write(&csio, 737 /*retries*/1, 738 dadone, 739 MSG_ORDERED_Q_TAG, 740 /*read*/FALSE, 741 /*byte2*/0, 742 /*minimum_cmd_size*/ softc->minimum_cmd_size, 743 blknum, 744 blkcnt * dumppages, 745 /*data_ptr*/(u_int8_t *) va, 746 /*dxfer_len*/blkcnt * secsize * dumppages, 747 /*sense_len*/SSD_FULL_SIZE, 748 DA_DEFAULT_TIMEOUT * 1000); 749 xpt_polled_action((union ccb *)&csio); 750 751 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 752 printf("Aborting dump due to I/O error.\n"); 753 if ((csio.ccb_h.status & CAM_STATUS_MASK) == 754 CAM_SCSI_STATUS_ERROR) 755 scsi_sense_print(&csio); 756 else 757 printf("status == 0x%x, scsi status == 0x%x\n", 758 csio.ccb_h.status, csio.scsi_status); 759 return(EIO); 760 } 761 762 if (dumpstatus(addr, (long)(num * softc->params.secsize)) < 0) 763 return (EINTR); 764 765 /* update block count */ 766 num -= blkcnt * dumppages; 767 blknum += blkcnt * dumppages; 768 addr += PAGE_SIZE * dumppages; 769 } 770 771 /* 772 * Sync the disk cache contents to the physical media. 773 */ 774 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 775 776 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 777 csio.ccb_h.ccb_state = DA_CCB_DUMP; 778 scsi_synchronize_cache(&csio, 779 /*retries*/1, 780 /*cbfcnp*/dadone, 781 MSG_SIMPLE_Q_TAG, 782 /*begin_lba*/0,/* Cover the whole disk */ 783 /*lb_count*/0, 784 SSD_FULL_SIZE, 785 5 * 60 * 1000); 786 xpt_polled_action((union ccb *)&csio); 787 788 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 789 if ((csio.ccb_h.status & CAM_STATUS_MASK) == 790 CAM_SCSI_STATUS_ERROR) { 791 int asc, ascq; 792 int sense_key, error_code; 793 794 scsi_extract_sense(&csio.sense_data, 795 &error_code, 796 &sense_key, 797 &asc, &ascq); 798 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 799 scsi_sense_print(&csio); 800 } else { 801 xpt_print_path(periph->path); 802 printf("Synchronize cache failed, status " 803 "== 0x%x, scsi status == 0x%x\n", 804 csio.ccb_h.status, csio.scsi_status); 805 } 806 } 807 } 808 return (0); 809 } 810 811 static void 812 dainit(void) 813 { 814 cam_status status; 815 struct cam_path *path; 816 817 /* 818 * Create our extend array for storing the devices we attach to. 819 */ 820 daperiphs = cam_extend_new(); 821 SLIST_INIT(&softc_list); 822 if (daperiphs == NULL) { 823 printf("da: Failed to alloc extend array!\n"); 824 return; 825 } 826 827 /* 828 * Install a global async callback. This callback will 829 * receive async callbacks like "new device found". 830 */ 831 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 832 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 833 834 if (status == CAM_REQ_CMP) { 835 struct ccb_setasync csa; 836 837 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 838 csa.ccb_h.func_code = XPT_SASYNC_CB; 839 csa.event_enable = AC_FOUND_DEVICE; 840 csa.callback = daasync; 841 csa.callback_arg = NULL; 842 xpt_action((union ccb *)&csa); 843 status = csa.ccb_h.status; 844 xpt_free_path(path); 845 } 846 847 if (status != CAM_REQ_CMP) { 848 printf("da: Failed to attach master async callback " 849 "due to status 0x%x!\n", status); 850 } else { 851 852 /* 853 * Schedule a periodic event to occasionally send an 854 * ordered tag to a device. 855 */ 856 timeout(dasendorderedtag, NULL, 857 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL); 858 859 /* Register our shutdown event handler */ 860 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, 861 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 862 printf("dainit: shutdown event registration failed!\n"); 863 } 864 } 865 866 static void 867 daoninvalidate(struct cam_periph *periph) 868 { 869 int s; 870 struct da_softc *softc; 871 struct bio *q_bp; 872 struct ccb_setasync csa; 873 874 softc = (struct da_softc *)periph->softc; 875 876 /* 877 * De-register any async callbacks. 878 */ 879 xpt_setup_ccb(&csa.ccb_h, periph->path, 880 /* priority */ 5); 881 csa.ccb_h.func_code = XPT_SASYNC_CB; 882 csa.event_enable = 0; 883 csa.callback = daasync; 884 csa.callback_arg = periph; 885 xpt_action((union ccb *)&csa); 886 887 softc->flags |= DA_FLAG_PACK_INVALID; 888 889 /* 890 * Although the oninvalidate() routines are always called at 891 * splsoftcam, we need to be at splbio() here to keep the buffer 892 * queue from being modified while we traverse it. 893 */ 894 s = splbio(); 895 896 /* 897 * Return all queued I/O with ENXIO. 898 * XXX Handle any transactions queued to the card 899 * with XPT_ABORT_CCB. 900 */ 901 while ((q_bp = bioq_first(&softc->bio_queue)) != NULL){ 902 bioq_remove(&softc->bio_queue, q_bp); 903 q_bp->bio_resid = q_bp->bio_bcount; 904 biofinish(q_bp, NULL, ENXIO); 905 } 906 splx(s); 907 908 SLIST_REMOVE(&softc_list, softc, da_softc, links); 909 910 xpt_print_path(periph->path); 911 printf("lost device\n"); 912 } 913 914 static void 915 dacleanup(struct cam_periph *periph) 916 { 917 struct da_softc *softc; 918 919 softc = (struct da_softc *)periph->softc; 920 921 devstat_remove_entry(&softc->device_stats); 922 cam_extend_release(daperiphs, periph->unit_number); 923 xpt_print_path(periph->path); 924 printf("removing device entry\n"); 925 if (softc->dev) { 926 disk_destroy(softc->dev); 927 } 928 free(softc, M_DEVBUF); 929 } 930 931 static void 932 daasync(void *callback_arg, u_int32_t code, 933 struct cam_path *path, void *arg) 934 { 935 struct cam_periph *periph; 936 937 periph = (struct cam_periph *)callback_arg; 938 switch (code) { 939 case AC_FOUND_DEVICE: 940 { 941 struct ccb_getdev *cgd; 942 cam_status status; 943 944 cgd = (struct ccb_getdev *)arg; 945 if (cgd == NULL) 946 break; 947 948 if (SID_TYPE(&cgd->inq_data) != T_DIRECT 949 && SID_TYPE(&cgd->inq_data) != T_OPTICAL) 950 break; 951 952 /* 953 * Allocate a peripheral instance for 954 * this device and start the probe 955 * process. 956 */ 957 status = cam_periph_alloc(daregister, daoninvalidate, 958 dacleanup, dastart, 959 "da", CAM_PERIPH_BIO, 960 cgd->ccb_h.path, daasync, 961 AC_FOUND_DEVICE, cgd); 962 963 if (status != CAM_REQ_CMP 964 && status != CAM_REQ_INPROG) 965 printf("daasync: Unable to attach to new device " 966 "due to status 0x%x\n", status); 967 break; 968 } 969 case AC_SENT_BDR: 970 case AC_BUS_RESET: 971 { 972 struct da_softc *softc; 973 struct ccb_hdr *ccbh; 974 int s; 975 976 softc = (struct da_softc *)periph->softc; 977 s = splsoftcam(); 978 /* 979 * Don't fail on the expected unit attention 980 * that will occur. 981 */ 982 softc->flags |= DA_FLAG_RETRY_UA; 983 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) 984 ccbh->ccb_state |= DA_CCB_RETRY_UA; 985 splx(s); 986 /* FALLTHROUGH*/ 987 } 988 default: 989 cam_periph_async(periph, code, path, arg); 990 break; 991 } 992 } 993 994 static cam_status 995 daregister(struct cam_periph *periph, void *arg) 996 { 997 int s; 998 struct da_softc *softc; 999 struct ccb_setasync csa; 1000 struct ccb_getdev *cgd; 1001 caddr_t match; 1002 1003 cgd = (struct ccb_getdev *)arg; 1004 if (periph == NULL) { 1005 printf("daregister: periph was NULL!!\n"); 1006 return(CAM_REQ_CMP_ERR); 1007 } 1008 1009 if (cgd == NULL) { 1010 printf("daregister: no getdev CCB, can't register device\n"); 1011 return(CAM_REQ_CMP_ERR); 1012 } 1013 1014 softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); 1015 1016 if (softc == NULL) { 1017 printf("daregister: Unable to probe new device. " 1018 "Unable to allocate softc\n"); 1019 return(CAM_REQ_CMP_ERR); 1020 } 1021 1022 bzero(softc, sizeof(*softc)); 1023 LIST_INIT(&softc->pending_ccbs); 1024 softc->state = DA_STATE_PROBE; 1025 bioq_init(&softc->bio_queue); 1026 if (SID_IS_REMOVABLE(&cgd->inq_data)) 1027 softc->flags |= DA_FLAG_PACK_REMOVABLE; 1028 if ((cgd->inq_data.flags & SID_CmdQue) != 0) 1029 softc->flags |= DA_FLAG_TAGGED_QUEUING; 1030 1031 periph->softc = softc; 1032 1033 cam_extend_set(daperiphs, periph->unit_number, periph); 1034 1035 /* 1036 * See if this device has any quirks. 1037 */ 1038 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 1039 (caddr_t)da_quirk_table, 1040 sizeof(da_quirk_table)/sizeof(*da_quirk_table), 1041 sizeof(*da_quirk_table), scsi_inquiry_match); 1042 1043 if (match != NULL) 1044 softc->quirks = ((struct da_quirk_entry *)match)->quirks; 1045 else 1046 softc->quirks = DA_Q_NONE; 1047 1048 if (softc->quirks & DA_Q_NO_6_BYTE) 1049 softc->minimum_cmd_size = 10; 1050 else 1051 softc->minimum_cmd_size = 6; 1052 1053 /* 1054 * Block our timeout handler while we 1055 * add this softc to the dev list. 1056 */ 1057 s = splsoftclock(); 1058 SLIST_INSERT_HEAD(&softc_list, softc, links); 1059 splx(s); 1060 1061 /* 1062 * The DA driver supports a blocksize, but 1063 * we don't know the blocksize until we do 1064 * a read capacity. So, set a flag to 1065 * indicate that the blocksize is 1066 * unavailable right now. We'll clear the 1067 * flag as soon as we've done a read capacity. 1068 */ 1069 devstat_add_entry(&softc->device_stats, "da", 1070 periph->unit_number, 0, 1071 DEVSTAT_BS_UNAVAILABLE, 1072 SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI, 1073 DEVSTAT_PRIORITY_DISK); 1074 1075 /* 1076 * Register this media as a disk 1077 */ 1078 softc->dev = disk_create(periph->unit_number, &softc->disk, 0, 1079 &da_cdevsw, &dadisk_cdevsw); 1080 1081 /* 1082 * Add async callbacks for bus reset and 1083 * bus device reset calls. I don't bother 1084 * checking if this fails as, in most cases, 1085 * the system will function just fine without 1086 * them and the only alternative would be to 1087 * not attach the device on failure. 1088 */ 1089 xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); 1090 csa.ccb_h.func_code = XPT_SASYNC_CB; 1091 csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; 1092 csa.callback = daasync; 1093 csa.callback_arg = periph; 1094 xpt_action((union ccb *)&csa); 1095 /* 1096 * Lock this peripheral until we are setup. 1097 * This first call can't block 1098 */ 1099 (void)cam_periph_lock(periph, PRIBIO); 1100 xpt_schedule(periph, /*priority*/5); 1101 1102 return(CAM_REQ_CMP); 1103 } 1104 1105 static void 1106 dastart(struct cam_periph *periph, union ccb *start_ccb) 1107 { 1108 struct da_softc *softc; 1109 1110 softc = (struct da_softc *)periph->softc; 1111 1112 1113 switch (softc->state) { 1114 case DA_STATE_NORMAL: 1115 { 1116 /* Pull a buffer from the queue and get going on it */ 1117 struct bio *bp; 1118 int s; 1119 1120 /* 1121 * See if there is a buf with work for us to do.. 1122 */ 1123 s = splbio(); 1124 bp = bioq_first(&softc->bio_queue); 1125 if (periph->immediate_priority <= periph->pinfo.priority) { 1126 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 1127 ("queuing for immediate ccb\n")); 1128 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING; 1129 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 1130 periph_links.sle); 1131 periph->immediate_priority = CAM_PRIORITY_NONE; 1132 splx(s); 1133 wakeup(&periph->ccb_list); 1134 } else if (bp == NULL) { 1135 splx(s); 1136 xpt_release_ccb(start_ccb); 1137 } else { 1138 int oldspl; 1139 u_int8_t tag_code; 1140 1141 bioq_remove(&softc->bio_queue, bp); 1142 1143 devstat_start_transaction(&softc->device_stats); 1144 1145 if ((bp->bio_flags & BIO_ORDERED) != 0 1146 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { 1147 softc->flags &= ~DA_FLAG_NEED_OTAG; 1148 softc->ordered_tag_count++; 1149 tag_code = MSG_ORDERED_Q_TAG; 1150 } else { 1151 tag_code = MSG_SIMPLE_Q_TAG; 1152 } 1153 scsi_read_write(&start_ccb->csio, 1154 /*retries*/da_retry_count, 1155 dadone, 1156 tag_code, 1157 bp->bio_cmd == BIO_READ, 1158 /*byte2*/0, 1159 softc->minimum_cmd_size, 1160 bp->bio_pblkno, 1161 bp->bio_bcount / softc->params.secsize, 1162 bp->bio_data, 1163 bp->bio_bcount, 1164 /*sense_len*/SSD_FULL_SIZE, 1165 da_default_timeout * 1000); 1166 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 1167 1168 /* 1169 * Block out any asyncronous callbacks 1170 * while we touch the pending ccb list. 1171 */ 1172 oldspl = splcam(); 1173 LIST_INSERT_HEAD(&softc->pending_ccbs, 1174 &start_ccb->ccb_h, periph_links.le); 1175 splx(oldspl); 1176 1177 /* We expect a unit attention from this device */ 1178 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 1179 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 1180 softc->flags &= ~DA_FLAG_RETRY_UA; 1181 } 1182 1183 start_ccb->ccb_h.ccb_bp = bp; 1184 bp = bioq_first(&softc->bio_queue); 1185 splx(s); 1186 1187 xpt_action(start_ccb); 1188 } 1189 1190 if (bp != NULL) { 1191 /* Have more work to do, so ensure we stay scheduled */ 1192 xpt_schedule(periph, /* XXX priority */1); 1193 } 1194 break; 1195 } 1196 case DA_STATE_PROBE: 1197 { 1198 struct ccb_scsiio *csio; 1199 struct scsi_read_capacity_data *rcap; 1200 1201 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 1202 M_TEMP, 1203 M_NOWAIT); 1204 if (rcap == NULL) { 1205 printf("dastart: Couldn't malloc read_capacity data\n"); 1206 /* da_free_periph??? */ 1207 break; 1208 } 1209 csio = &start_ccb->csio; 1210 scsi_read_capacity(csio, 1211 /*retries*/4, 1212 dadone, 1213 MSG_SIMPLE_Q_TAG, 1214 rcap, 1215 SSD_FULL_SIZE, 1216 /*timeout*/5000); 1217 start_ccb->ccb_h.ccb_bp = NULL; 1218 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE; 1219 xpt_action(start_ccb); 1220 break; 1221 } 1222 } 1223 } 1224 1225 1226 static void 1227 dadone(struct cam_periph *periph, union ccb *done_ccb) 1228 { 1229 struct da_softc *softc; 1230 struct ccb_scsiio *csio; 1231 1232 softc = (struct da_softc *)periph->softc; 1233 csio = &done_ccb->csio; 1234 switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) { 1235 case DA_CCB_BUFFER_IO: 1236 { 1237 struct bio *bp; 1238 int oldspl; 1239 1240 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 1241 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1242 int error; 1243 int s; 1244 int sf; 1245 1246 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 1247 sf = SF_RETRY_UA; 1248 else 1249 sf = 0; 1250 1251 error = daerror(done_ccb, CAM_RETRY_SELTO, sf); 1252 if (error == ERESTART) { 1253 /* 1254 * A retry was scheuled, so 1255 * just return. 1256 */ 1257 return; 1258 } 1259 if (error != 0) { 1260 struct bio *q_bp; 1261 1262 s = splbio(); 1263 1264 if (error == ENXIO) { 1265 /* 1266 * Catastrophic error. Mark our pack as 1267 * invalid. 1268 */ 1269 /* XXX See if this is really a media 1270 * change first. 1271 */ 1272 xpt_print_path(periph->path); 1273 printf("Invalidating pack\n"); 1274 softc->flags |= DA_FLAG_PACK_INVALID; 1275 } 1276 1277 /* 1278 * return all queued I/O with EIO, so that 1279 * the client can retry these I/Os in the 1280 * proper order should it attempt to recover. 1281 */ 1282 while ((q_bp = bioq_first(&softc->bio_queue)) 1283 != NULL) { 1284 bioq_remove(&softc->bio_queue, q_bp); 1285 q_bp->bio_resid = q_bp->bio_bcount; 1286 biofinish(q_bp, NULL, EIO); 1287 } 1288 splx(s); 1289 bp->bio_error = error; 1290 bp->bio_resid = bp->bio_bcount; 1291 bp->bio_flags |= BIO_ERROR; 1292 } else { 1293 bp->bio_resid = csio->resid; 1294 bp->bio_error = 0; 1295 if (bp->bio_resid != 0) { 1296 /* Short transfer ??? */ 1297 bp->bio_flags |= BIO_ERROR; 1298 } 1299 } 1300 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1301 cam_release_devq(done_ccb->ccb_h.path, 1302 /*relsim_flags*/0, 1303 /*reduction*/0, 1304 /*timeout*/0, 1305 /*getcount_only*/0); 1306 } else { 1307 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1308 panic("REQ_CMP with QFRZN"); 1309 bp->bio_resid = csio->resid; 1310 if (csio->resid > 0) 1311 bp->bio_flags |= BIO_ERROR; 1312 } 1313 1314 /* 1315 * Block out any asyncronous callbacks 1316 * while we touch the pending ccb list. 1317 */ 1318 oldspl = splcam(); 1319 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 1320 splx(oldspl); 1321 1322 if (softc->device_stats.busy_count == 0) 1323 softc->flags |= DA_FLAG_WENT_IDLE; 1324 1325 biofinish(bp, &softc->device_stats, 0); 1326 break; 1327 } 1328 case DA_CCB_PROBE: 1329 { 1330 struct scsi_read_capacity_data *rdcap; 1331 char announce_buf[80]; 1332 1333 rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; 1334 1335 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 1336 struct disk_params *dp; 1337 1338 dasetgeom(periph, rdcap); 1339 dp = &softc->params; 1340 snprintf(announce_buf, sizeof(announce_buf), 1341 "%luMB (%u %u byte sectors: %dH %dS/T %dC)", 1342 (unsigned long) (((u_int64_t)dp->secsize * 1343 dp->sectors) / (1024*1024)), dp->sectors, 1344 dp->secsize, dp->heads, dp->secs_per_track, 1345 dp->cylinders); 1346 } else { 1347 int error; 1348 1349 announce_buf[0] = '\0'; 1350 1351 /* 1352 * Retry any UNIT ATTENTION type errors. They 1353 * are expected at boot. 1354 */ 1355 error = daerror(done_ccb, CAM_RETRY_SELTO, 1356 SF_RETRY_UA|SF_NO_PRINT); 1357 if (error == ERESTART) { 1358 /* 1359 * A retry was scheuled, so 1360 * just return. 1361 */ 1362 return; 1363 } else if (error != 0) { 1364 struct scsi_sense_data *sense; 1365 int asc, ascq; 1366 int sense_key, error_code; 1367 int have_sense; 1368 cam_status status; 1369 struct ccb_getdev cgd; 1370 1371 /* Don't wedge this device's queue */ 1372 status = done_ccb->ccb_h.status; 1373 if ((status & CAM_DEV_QFRZN) != 0) 1374 cam_release_devq(done_ccb->ccb_h.path, 1375 /*relsim_flags*/0, 1376 /*reduction*/0, 1377 /*timeout*/0, 1378 /*getcount_only*/0); 1379 1380 1381 xpt_setup_ccb(&cgd.ccb_h, 1382 done_ccb->ccb_h.path, 1383 /* priority */ 1); 1384 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1385 xpt_action((union ccb *)&cgd); 1386 1387 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0) 1388 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0) 1389 || ((status & CAM_AUTOSNS_VALID) == 0)) 1390 have_sense = FALSE; 1391 else 1392 have_sense = TRUE; 1393 1394 if (have_sense) { 1395 sense = &csio->sense_data; 1396 scsi_extract_sense(sense, &error_code, 1397 &sense_key, 1398 &asc, &ascq); 1399 } 1400 /* 1401 * Attach to anything that claims to be a 1402 * direct access or optical disk device, 1403 * as long as it doesn't return a "Logical 1404 * unit not supported" (0x25) error. 1405 */ 1406 if ((have_sense) && (asc != 0x25) 1407 && (error_code == SSD_CURRENT_ERROR)) { 1408 const char *sense_key_desc; 1409 const char *asc_desc; 1410 1411 scsi_sense_desc(sense_key, asc, ascq, 1412 &cgd.inq_data, 1413 &sense_key_desc, 1414 &asc_desc); 1415 snprintf(announce_buf, 1416 sizeof(announce_buf), 1417 "Attempt to query device " 1418 "size failed: %s, %s", 1419 sense_key_desc, 1420 asc_desc); 1421 } else { 1422 if (have_sense) 1423 scsi_sense_print( 1424 &done_ccb->csio); 1425 else { 1426 xpt_print_path(periph->path); 1427 printf("got CAM status %#x\n", 1428 done_ccb->ccb_h.status); 1429 } 1430 1431 xpt_print_path(periph->path); 1432 printf("fatal error, failed" 1433 " to attach to device\n"); 1434 1435 /* 1436 * Free up resources. 1437 */ 1438 cam_periph_invalidate(periph); 1439 } 1440 } 1441 } 1442 free(rdcap, M_TEMP); 1443 if (announce_buf[0] != '\0') 1444 xpt_announce_periph(periph, announce_buf); 1445 softc->state = DA_STATE_NORMAL; 1446 /* 1447 * Since our peripheral may be invalidated by an error 1448 * above or an external event, we must release our CCB 1449 * before releasing the probe lock on the peripheral. 1450 * The peripheral will only go away once the last lock 1451 * is removed, and we need it around for the CCB release 1452 * operation. 1453 */ 1454 xpt_release_ccb(done_ccb); 1455 cam_periph_unlock(periph); 1456 return; 1457 } 1458 case DA_CCB_WAITING: 1459 { 1460 /* Caller will release the CCB */ 1461 wakeup(&done_ccb->ccb_h.cbfcnp); 1462 return; 1463 } 1464 case DA_CCB_DUMP: 1465 /* No-op. We're polling */ 1466 return; 1467 default: 1468 break; 1469 } 1470 xpt_release_ccb(done_ccb); 1471 } 1472 1473 static int 1474 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1475 { 1476 struct da_softc *softc; 1477 struct cam_periph *periph; 1478 1479 periph = xpt_path_periph(ccb->ccb_h.path); 1480 softc = (struct da_softc *)periph->softc; 1481 1482 /* 1483 * XXX 1484 * Until we have a better way of doing pack validation, 1485 * don't treat UAs as errors. 1486 */ 1487 sense_flags |= SF_RETRY_UA; 1488 return(cam_periph_error(ccb, cam_flags, sense_flags, 1489 &softc->saved_ccb)); 1490 } 1491 1492 static void 1493 daprevent(struct cam_periph *periph, int action) 1494 { 1495 struct da_softc *softc; 1496 union ccb *ccb; 1497 int error; 1498 1499 softc = (struct da_softc *)periph->softc; 1500 1501 if (((action == PR_ALLOW) 1502 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 1503 || ((action == PR_PREVENT) 1504 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 1505 return; 1506 } 1507 1508 ccb = cam_periph_getccb(periph, /*priority*/1); 1509 1510 scsi_prevent(&ccb->csio, 1511 /*retries*/1, 1512 /*cbcfp*/dadone, 1513 MSG_SIMPLE_Q_TAG, 1514 action, 1515 SSD_FULL_SIZE, 1516 5000); 1517 1518 error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO, 1519 SF_RETRY_UA, &softc->device_stats); 1520 1521 if (error == 0) { 1522 if (action == PR_ALLOW) 1523 softc->flags &= ~DA_FLAG_PACK_LOCKED; 1524 else 1525 softc->flags |= DA_FLAG_PACK_LOCKED; 1526 } 1527 1528 xpt_release_ccb(ccb); 1529 } 1530 1531 static void 1532 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap) 1533 { 1534 struct ccb_calc_geometry ccg; 1535 struct da_softc *softc; 1536 struct disk_params *dp; 1537 1538 softc = (struct da_softc *)periph->softc; 1539 1540 dp = &softc->params; 1541 dp->secsize = scsi_4btoul(rdcap->length); 1542 dp->sectors = scsi_4btoul(rdcap->addr) + 1; 1543 /* 1544 * Have the controller provide us with a geometry 1545 * for this disk. The only time the geometry 1546 * matters is when we boot and the controller 1547 * is the only one knowledgeable enough to come 1548 * up with something that will make this a bootable 1549 * device. 1550 */ 1551 xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1); 1552 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 1553 ccg.block_size = dp->secsize; 1554 ccg.volume_size = dp->sectors; 1555 ccg.heads = 0; 1556 ccg.secs_per_track = 0; 1557 ccg.cylinders = 0; 1558 xpt_action((union ccb*)&ccg); 1559 dp->heads = ccg.heads; 1560 dp->secs_per_track = ccg.secs_per_track; 1561 dp->cylinders = ccg.cylinders; 1562 } 1563 1564 static void 1565 dasendorderedtag(void *arg) 1566 { 1567 struct da_softc *softc; 1568 int s; 1569 1570 for (softc = SLIST_FIRST(&softc_list); 1571 softc != NULL; 1572 softc = SLIST_NEXT(softc, links)) { 1573 s = splsoftcam(); 1574 if ((softc->ordered_tag_count == 0) 1575 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) { 1576 softc->flags |= DA_FLAG_NEED_OTAG; 1577 } 1578 if (softc->device_stats.busy_count > 0) 1579 softc->flags &= ~DA_FLAG_WENT_IDLE; 1580 1581 softc->ordered_tag_count = 0; 1582 splx(s); 1583 } 1584 /* Queue us up again */ 1585 timeout(dasendorderedtag, NULL, 1586 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL); 1587 } 1588 1589 /* 1590 * Step through all DA peripheral drivers, and if the device is still open, 1591 * sync the disk cache to physical media. 1592 */ 1593 static void 1594 dashutdown(void * arg, int howto) 1595 { 1596 struct cam_periph *periph; 1597 struct da_softc *softc; 1598 1599 TAILQ_FOREACH(periph, &dadriver.units, unit_links) { 1600 union ccb ccb; 1601 softc = (struct da_softc *)periph->softc; 1602 1603 /* 1604 * We only sync the cache if the drive is still open, and 1605 * if the drive is capable of it.. 1606 */ 1607 if (((softc->flags & DA_FLAG_OPEN) == 0) 1608 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) 1609 continue; 1610 1611 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1); 1612 1613 ccb.ccb_h.ccb_state = DA_CCB_DUMP; 1614 scsi_synchronize_cache(&ccb.csio, 1615 /*retries*/1, 1616 /*cbfcnp*/dadone, 1617 MSG_SIMPLE_Q_TAG, 1618 /*begin_lba*/0, /* whole disk */ 1619 /*lb_count*/0, 1620 SSD_FULL_SIZE, 1621 5 * 60 * 1000); 1622 1623 xpt_polled_action(&ccb); 1624 1625 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1626 if (((ccb.ccb_h.status & CAM_STATUS_MASK) == 1627 CAM_SCSI_STATUS_ERROR) 1628 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){ 1629 int error_code, sense_key, asc, ascq; 1630 1631 scsi_extract_sense(&ccb.csio.sense_data, 1632 &error_code, &sense_key, 1633 &asc, &ascq); 1634 1635 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 1636 scsi_sense_print(&ccb.csio); 1637 } else { 1638 xpt_print_path(periph->path); 1639 printf("Synchronize cache failed, status " 1640 "== 0x%x, scsi status == 0x%x\n", 1641 ccb.ccb_h.status, ccb.csio.scsi_status); 1642 } 1643 } 1644 1645 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 1646 cam_release_devq(ccb.ccb_h.path, 1647 /*relsim_flags*/0, 1648 /*reduction*/0, 1649 /*timeout*/0, 1650 /*getcount_only*/0); 1651 1652 } 1653 } 1654 1655 #else /* !_KERNEL */ 1656 1657 /* 1658 * XXX This is only left out of the kernel build to silence warnings. If, 1659 * for some reason this function is used in the kernel, the ifdefs should 1660 * be moved so it is included both in the kernel and userland. 1661 */ 1662 void 1663 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, 1664 void (*cbfcnp)(struct cam_periph *, union ccb *), 1665 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, 1666 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 1667 u_int32_t timeout) 1668 { 1669 struct scsi_format_unit *scsi_cmd; 1670 1671 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; 1672 scsi_cmd->opcode = FORMAT_UNIT; 1673 scsi_cmd->byte2 = byte2; 1674 scsi_ulto2b(ileave, scsi_cmd->interleave); 1675 1676 cam_fill_csio(csio, 1677 retries, 1678 cbfcnp, 1679 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 1680 tag_action, 1681 data_ptr, 1682 dxfer_len, 1683 sense_len, 1684 sizeof(*scsi_cmd), 1685 timeout); 1686 } 1687 1688 #endif /* _KERNEL */ 1689