1 /* 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * Copyright (c) 1997 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifdef _KERNEL 32 #include "opt_hw_wdog.h" 33 #endif /* _KERNEL */ 34 35 #include <sys/param.h> 36 37 #ifdef _KERNEL 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/bio.h> 41 #include <sys/sysctl.h> 42 #endif /* _KERNEL */ 43 44 #include <sys/devicestat.h> 45 #include <sys/conf.h> 46 #include <sys/disk.h> 47 #include <sys/eventhandler.h> 48 #include <sys/malloc.h> 49 #include <sys/cons.h> 50 51 #include <machine/md_var.h> 52 53 #include <vm/vm.h> 54 #include <vm/pmap.h> 55 56 #ifndef _KERNEL 57 #include <stdio.h> 58 #include <string.h> 59 #endif /* _KERNEL */ 60 61 #include <cam/cam.h> 62 #include <cam/cam_ccb.h> 63 #include <cam/cam_extend.h> 64 #include <cam/cam_periph.h> 65 #include <cam/cam_xpt_periph.h> 66 67 #include <cam/scsi/scsi_message.h> 68 69 #ifndef _KERNEL 70 #include <cam/scsi/scsi_da.h> 71 #endif /* !_KERNEL */ 72 73 #ifdef _KERNEL 74 typedef enum { 75 DA_STATE_PROBE, 76 DA_STATE_NORMAL 77 } da_state; 78 79 typedef enum { 80 DA_FLAG_PACK_INVALID = 0x001, 81 DA_FLAG_NEW_PACK = 0x002, 82 DA_FLAG_PACK_LOCKED = 0x004, 83 DA_FLAG_PACK_REMOVABLE = 0x008, 84 DA_FLAG_TAGGED_QUEUING = 0x010, 85 DA_FLAG_NEED_OTAG = 0x020, 86 DA_FLAG_WENT_IDLE = 0x040, 87 DA_FLAG_RETRY_UA = 0x080, 88 DA_FLAG_OPEN = 0x100 89 } da_flags; 90 91 typedef enum { 92 DA_Q_NONE = 0x00, 93 DA_Q_NO_SYNC_CACHE = 0x01, 94 DA_Q_NO_6_BYTE = 0x02 95 } da_quirks; 96 97 typedef enum { 98 DA_CCB_PROBE = 0x01, 99 DA_CCB_BUFFER_IO = 0x02, 100 DA_CCB_WAITING = 0x03, 101 DA_CCB_DUMP = 0x04, 102 DA_CCB_TYPE_MASK = 0x0F, 103 DA_CCB_RETRY_UA = 0x10 104 } da_ccb_state; 105 106 /* Offsets into our private area for storing information */ 107 #define ccb_state ppriv_field0 108 #define ccb_bp ppriv_ptr1 109 110 struct disk_params { 111 u_int8_t heads; 112 u_int16_t cylinders; 113 u_int8_t secs_per_track; 114 u_int32_t secsize; /* Number of bytes/sector */ 115 u_int32_t sectors; /* total number sectors */ 116 }; 117 118 struct da_softc { 119 struct bio_queue_head bio_queue; 120 struct devstat device_stats; 121 SLIST_ENTRY(da_softc) links; 122 LIST_HEAD(, ccb_hdr) pending_ccbs; 123 da_state state; 124 da_flags flags; 125 da_quirks quirks; 126 int minimum_cmd_size; 127 int ordered_tag_count; 128 struct disk_params params; 129 struct disk disk; 130 union ccb saved_ccb; 131 dev_t dev; 132 }; 133 134 struct da_quirk_entry { 135 struct scsi_inquiry_pattern inq_pat; 136 da_quirks quirks; 137 }; 138 139 static const char quantum[] = "QUANTUM"; 140 static const char microp[] = "MICROP"; 141 142 static struct da_quirk_entry da_quirk_table[] = 143 { 144 { 145 /* 146 * This particular Fujitsu drive doesn't like the 147 * synchronize cache command. 148 * Reported by: Tom Jackson <toj@gorilla.net> 149 */ 150 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, 151 /*quirks*/ DA_Q_NO_SYNC_CACHE 152 153 }, 154 { 155 /* 156 * This drive doesn't like the synchronize cache command 157 * either. Reported by: Matthew Jacob <mjacob@feral.com> 158 * in NetBSD PR kern/6027, August 24, 1998. 159 */ 160 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, 161 /*quirks*/ DA_Q_NO_SYNC_CACHE 162 }, 163 { 164 /* 165 * This drive doesn't like the synchronize cache command 166 * either. Reported by: Hellmuth Michaelis (hm@kts.org) 167 * (PR 8882). 168 */ 169 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, 170 /*quirks*/ DA_Q_NO_SYNC_CACHE 171 }, 172 { 173 /* 174 * Doesn't like the synchronize cache command. 175 * Reported by: Blaz Zupan <blaz@gold.amis.net> 176 */ 177 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, 178 /*quirks*/ DA_Q_NO_SYNC_CACHE 179 }, 180 { 181 /* 182 * Doesn't like the synchronize cache command. 183 */ 184 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, 185 /*quirks*/ DA_Q_NO_SYNC_CACHE 186 }, 187 { 188 /* 189 * Doesn't like the synchronize cache command. 190 */ 191 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, 192 /*quirks*/ DA_Q_NO_SYNC_CACHE 193 }, 194 { 195 /* 196 * Doesn't work correctly with 6 byte reads/writes. 197 * Returns illegal request, and points to byte 9 of the 198 * 6-byte CDB. 199 * Reported by: Adam McDougall <bsdx@spawnet.com> 200 */ 201 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, 202 /*quirks*/ DA_Q_NO_6_BYTE 203 }, 204 { 205 /* 206 * See above. 207 */ 208 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, 209 /*quirks*/ DA_Q_NO_6_BYTE 210 }, 211 212 /* Below a list of quirks for USB devices supported by umass. */ 213 { 214 /* 215 * This USB floppy drive uses the UFI command set. This 216 * command set is a derivative of the ATAPI command set and 217 * does not support READ_6 commands only READ_10. It also does 218 * not support sync cache (0x35). 219 */ 220 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Y-E DATA", "USB-FDU", "*"}, 221 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 222 }, 223 { 224 /* Another USB floppy */ 225 {T_DIRECT, SIP_MEDIA_REMOVABLE, "MATSHITA", "FDD CF-VFDU*","*"}, 226 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 227 }, 228 { 229 /* 230 * Sony Memory Stick adapter MSAC-US1 and 231 * Sony PCG-C1VJ Internal Memory Stick Slot (MSC-U01). 232 * Make all sony MS* products use this quirk. 233 */ 234 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "MS*", "*"}, 235 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 236 }, 237 { 238 /* 239 * Sony Memory Stick adapter for the CLIE series 240 * of PalmOS PDA's 241 */ 242 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "CLIE*", "*"}, 243 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 244 }, 245 { 246 /* 247 * Sony DSC cameras (DSC-S30, DSC-S50, DSC-S70) 248 */ 249 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, 250 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 251 }, 252 { 253 /* 254 * Maxtor 3000LE USB Drive 255 */ 256 {T_DIRECT, SIP_MEDIA_FIXED, "MAXTOR*", "K040H2*", "*"}, 257 /*quirks*/ DA_Q_NO_6_BYTE 258 }, 259 { 260 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "MCF3064AP", "*"}, 261 /*quirks*/ DA_Q_NO_6_BYTE 262 }, 263 { 264 /* 265 * Microtech USB CameraMate 266 */ 267 {T_DIRECT, SIP_MEDIA_REMOVABLE, "eUSB Compact*", "Compact Flash*", "*"}, 268 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 269 }, 270 { 271 /* 272 * The vendor, product and version strings coming from the 273 * controller are null terminated instead of being padded with 274 * spaces. The trailing wildcard character '*' is required. 275 */ 276 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SMSC*", "USB FDC*","*"}, 277 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 278 }, 279 { 280 /* 281 * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1) 282 */ 283 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C-*", "*"}, 284 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 285 }, 286 { 287 /* 288 * KingByte Pen Drives 289 */ 290 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NO BRAND", "PEN DRIVE", "*"}, 291 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 292 }, 293 { 294 /* 295 * FujiFilm Camera 296 */ 297 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJIFILMUSB-DRIVEUNIT", "USB-DRIVEUNIT", "*"}, 298 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 299 } 300 }; 301 302 static d_open_t daopen; 303 static d_close_t daclose; 304 static d_strategy_t dastrategy; 305 static d_ioctl_t daioctl; 306 static d_dump_t dadump; 307 static periph_init_t dainit; 308 static void daasync(void *callback_arg, u_int32_t code, 309 struct cam_path *path, void *arg); 310 static periph_ctor_t daregister; 311 static periph_dtor_t dacleanup; 312 static periph_start_t dastart; 313 static periph_oninv_t daoninvalidate; 314 static void dadone(struct cam_periph *periph, 315 union ccb *done_ccb); 316 static int daerror(union ccb *ccb, u_int32_t cam_flags, 317 u_int32_t sense_flags); 318 static void daprevent(struct cam_periph *periph, int action); 319 static void dasetgeom(struct cam_periph *periph, 320 struct scsi_read_capacity_data * rdcap); 321 static timeout_t dasendorderedtag; 322 static void dashutdown(void *arg, int howto); 323 324 #ifndef DA_DEFAULT_TIMEOUT 325 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 326 #endif 327 328 #ifndef DA_DEFAULT_RETRY 329 #define DA_DEFAULT_RETRY 4 330 #endif 331 332 static int da_retry_count = DA_DEFAULT_RETRY; 333 static int da_default_timeout = DA_DEFAULT_TIMEOUT; 334 335 SYSCTL_NODE(_kern, OID_AUTO, cam, CTLFLAG_RD, 0, "CAM Subsystem"); 336 SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, 337 "CAM Direct Access Disk driver"); 338 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW, 339 &da_retry_count, 0, "Normal I/O retry count"); 340 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW, 341 &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); 342 343 /* 344 * DA_ORDEREDTAG_INTERVAL determines how often, relative 345 * to the default timeout, we check to see whether an ordered 346 * tagged transaction is appropriate to prevent simple tag 347 * starvation. Since we'd like to ensure that there is at least 348 * 1/2 of the timeout length left for a starved transaction to 349 * complete after we've sent an ordered tag, we must poll at least 350 * four times in every timeout period. This takes care of the worst 351 * case where a starved transaction starts during an interval that 352 * meets the requirement "don't send an ordered tag" test so it takes 353 * us two intervals to determine that a tag must be sent. 354 */ 355 #ifndef DA_ORDEREDTAG_INTERVAL 356 #define DA_ORDEREDTAG_INTERVAL 4 357 #endif 358 359 static struct periph_driver dadriver = 360 { 361 dainit, "da", 362 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 363 }; 364 365 PERIPHDRIVER_DECLARE(da, dadriver); 366 367 #define DA_CDEV_MAJOR 13 368 369 /* For 2.2-stable support */ 370 #ifndef D_DISK 371 #define D_DISK 0 372 #endif 373 374 static struct cdevsw da_cdevsw = { 375 /* open */ daopen, 376 /* close */ daclose, 377 /* read */ physread, 378 /* write */ physwrite, 379 /* ioctl */ daioctl, 380 /* poll */ nopoll, 381 /* mmap */ nommap, 382 /* strategy */ dastrategy, 383 /* name */ "da", 384 /* maj */ DA_CDEV_MAJOR, 385 /* dump */ dadump, 386 /* psize */ nopsize, 387 /* flags */ D_DISK, 388 }; 389 390 static struct cdevsw dadisk_cdevsw; 391 392 static SLIST_HEAD(,da_softc) softc_list; 393 static struct extend_array *daperiphs; 394 395 static int 396 daopen(dev_t dev, int flags, int fmt, struct thread *td) 397 { 398 struct cam_periph *periph; 399 struct da_softc *softc; 400 struct disklabel *label; 401 struct scsi_read_capacity_data *rcap; 402 union ccb *ccb; 403 int unit; 404 int part; 405 int error; 406 int s; 407 408 unit = dkunit(dev); 409 part = dkpart(dev); 410 s = splsoftcam(); 411 periph = cam_extend_get(daperiphs, unit); 412 if (periph == NULL) { 413 splx(s); 414 return (ENXIO); 415 } 416 417 softc = (struct da_softc *)periph->softc; 418 419 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 420 ("daopen: dev=%s (unit %d , partition %d)\n", devtoname(dev), 421 unit, part)); 422 423 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) 424 return (error); /* error code from tsleep */ 425 426 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 427 return(ENXIO); 428 softc->flags |= DA_FLAG_OPEN; 429 430 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { 431 /* Invalidate our pack information. */ 432 disk_invalidate(&softc->disk); 433 softc->flags &= ~DA_FLAG_PACK_INVALID; 434 } 435 splx(s); 436 437 /* Do a read capacity */ 438 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 439 M_TEMP, 440 M_WAITOK); 441 442 ccb = cam_periph_getccb(periph, /*priority*/1); 443 scsi_read_capacity(&ccb->csio, 444 /*retries*/4, 445 /*cbfncp*/dadone, 446 MSG_SIMPLE_Q_TAG, 447 rcap, 448 SSD_FULL_SIZE, 449 /*timeout*/60000); 450 ccb->ccb_h.ccb_bp = NULL; 451 452 error = cam_periph_runccb(ccb, daerror, 453 /*cam_flags*/CAM_RETRY_SELTO, 454 /*sense_flags*/SF_RETRY_UA, 455 &softc->device_stats); 456 457 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 458 cam_release_devq(ccb->ccb_h.path, 459 /*relsim_flags*/0, 460 /*reduction*/0, 461 /*timeout*/0, 462 /*getcount_only*/0); 463 xpt_release_ccb(ccb); 464 465 if (error == 0) 466 dasetgeom(periph, rcap); 467 468 free(rcap, M_TEMP); 469 470 if (error == 0) { 471 struct ccb_getdev cgd; 472 473 /* Build label for whole disk. */ 474 label = &softc->disk.d_label; 475 bzero(label, sizeof(*label)); 476 label->d_type = DTYPE_SCSI; 477 478 /* 479 * Grab the inquiry data to get the vendor and product names. 480 * Put them in the typename and packname for the label. 481 */ 482 xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1); 483 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 484 xpt_action((union ccb *)&cgd); 485 486 strncpy(label->d_typename, cgd.inq_data.vendor, 487 min(SID_VENDOR_SIZE, sizeof(label->d_typename))); 488 strncpy(label->d_packname, cgd.inq_data.product, 489 min(SID_PRODUCT_SIZE, sizeof(label->d_packname))); 490 491 label->d_secsize = softc->params.secsize; 492 label->d_nsectors = softc->params.secs_per_track; 493 label->d_ntracks = softc->params.heads; 494 label->d_ncylinders = softc->params.cylinders; 495 label->d_secpercyl = softc->params.heads 496 * softc->params.secs_per_track; 497 label->d_secperunit = softc->params.sectors; 498 499 /* 500 * Check to see whether or not the blocksize is set yet. 501 * If it isn't, set it and then clear the blocksize 502 * unavailable flag for the device statistics. 503 */ 504 if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){ 505 softc->device_stats.block_size = softc->params.secsize; 506 softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE; 507 } 508 } 509 510 if (error == 0) { 511 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) 512 daprevent(periph, PR_PREVENT); 513 } 514 cam_periph_unlock(periph); 515 return (error); 516 } 517 518 static int 519 daclose(dev_t dev, int flag, int fmt, struct thread *td) 520 { 521 struct cam_periph *periph; 522 struct da_softc *softc; 523 int unit; 524 int error; 525 526 unit = dkunit(dev); 527 periph = cam_extend_get(daperiphs, unit); 528 if (periph == NULL) 529 return (ENXIO); 530 531 softc = (struct da_softc *)periph->softc; 532 533 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) { 534 return (error); /* error code from tsleep */ 535 } 536 537 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 538 union ccb *ccb; 539 540 ccb = cam_periph_getccb(periph, /*priority*/1); 541 542 scsi_synchronize_cache(&ccb->csio, 543 /*retries*/1, 544 /*cbfcnp*/dadone, 545 MSG_SIMPLE_Q_TAG, 546 /*begin_lba*/0,/* Cover the whole disk */ 547 /*lb_count*/0, 548 SSD_FULL_SIZE, 549 5 * 60 * 1000); 550 551 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 552 /*sense_flags*/SF_RETRY_UA, 553 &softc->device_stats); 554 555 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 556 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == 557 CAM_SCSI_STATUS_ERROR) { 558 int asc, ascq; 559 int sense_key, error_code; 560 561 scsi_extract_sense(&ccb->csio.sense_data, 562 &error_code, 563 &sense_key, 564 &asc, &ascq); 565 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 566 scsi_sense_print(&ccb->csio); 567 } else { 568 xpt_print_path(periph->path); 569 printf("Synchronize cache failed, status " 570 "== 0x%x, scsi status == 0x%x\n", 571 ccb->csio.ccb_h.status, 572 ccb->csio.scsi_status); 573 } 574 } 575 576 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 577 cam_release_devq(ccb->ccb_h.path, 578 /*relsim_flags*/0, 579 /*reduction*/0, 580 /*timeout*/0, 581 /*getcount_only*/0); 582 583 xpt_release_ccb(ccb); 584 585 } 586 587 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) { 588 daprevent(periph, PR_ALLOW); 589 /* 590 * If we've got removeable media, mark the blocksize as 591 * unavailable, since it could change when new media is 592 * inserted. 593 */ 594 softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE; 595 } 596 597 softc->flags &= ~DA_FLAG_OPEN; 598 cam_periph_unlock(periph); 599 cam_periph_release(periph); 600 return (0); 601 } 602 603 /* 604 * Actually translate the requested transfer into one the physical driver 605 * can understand. The transfer is described by a buf and will include 606 * only one physical transfer. 607 */ 608 static void 609 dastrategy(struct bio *bp) 610 { 611 struct cam_periph *periph; 612 struct da_softc *softc; 613 u_int unit; 614 u_int part; 615 int s; 616 617 unit = dkunit(bp->bio_dev); 618 part = dkpart(bp->bio_dev); 619 periph = cam_extend_get(daperiphs, unit); 620 if (periph == NULL) { 621 biofinish(bp, NULL, ENXIO); 622 return; 623 } 624 softc = (struct da_softc *)periph->softc; 625 #if 0 626 /* 627 * check it's not too big a transfer for our adapter 628 */ 629 scsi_minphys(bp,&sd_switch); 630 #endif 631 632 /* 633 * Mask interrupts so that the pack cannot be invalidated until 634 * after we are in the queue. Otherwise, we might not properly 635 * clean up one of the buffers. 636 */ 637 s = splbio(); 638 639 /* 640 * If the device has been made invalid, error out 641 */ 642 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 643 splx(s); 644 biofinish(bp, NULL, ENXIO); 645 return; 646 } 647 648 /* 649 * Place it in the queue of disk activities for this disk 650 */ 651 bioqdisksort(&softc->bio_queue, bp); 652 653 splx(s); 654 655 /* 656 * Schedule ourselves for performing the work. 657 */ 658 xpt_schedule(periph, /* XXX priority */1); 659 660 return; 661 } 662 663 /* For 2.2-stable support */ 664 #ifndef ENOIOCTL 665 #define ENOIOCTL -1 666 #endif 667 668 static int 669 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 670 { 671 struct cam_periph *periph; 672 struct da_softc *softc; 673 int unit; 674 int error; 675 676 unit = dkunit(dev); 677 periph = cam_extend_get(daperiphs, unit); 678 if (periph == NULL) 679 return (ENXIO); 680 681 softc = (struct da_softc *)periph->softc; 682 683 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n")); 684 685 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { 686 return (error); /* error code from tsleep */ 687 } 688 689 error = cam_periph_ioctl(periph, cmd, addr, daerror); 690 691 cam_periph_unlock(periph); 692 693 return (error); 694 } 695 696 static int 697 dadump(dev_t dev) 698 { 699 struct cam_periph *periph; 700 struct da_softc *softc; 701 u_int unit; 702 u_int part; 703 u_int secsize; 704 u_int num; /* number of sectors to write */ 705 u_int blknum; 706 long blkcnt; 707 vm_offset_t addr; 708 struct ccb_scsiio csio; 709 int dumppages = MAXDUMPPGS; 710 int error; 711 int i; 712 713 /* toss any characters present prior to dump */ 714 while (cncheckc() != -1) 715 ; 716 717 unit = dkunit(dev); 718 part = dkpart(dev); 719 periph = cam_extend_get(daperiphs, unit); 720 if (periph == NULL) { 721 return (ENXIO); 722 } 723 softc = (struct da_softc *)periph->softc; 724 725 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) 726 return (ENXIO); 727 728 error = disk_dumpcheck(dev, &num, &blknum, &secsize); 729 if (error) 730 return (error); 731 732 addr = 0; /* starting address */ 733 blkcnt = howmany(PAGE_SIZE, secsize); 734 735 while (num > 0) { 736 caddr_t va = NULL; 737 738 if ((num / blkcnt) < dumppages) 739 dumppages = num / blkcnt; 740 741 for (i = 0; i < dumppages; ++i) { 742 vm_offset_t a = addr + (i * PAGE_SIZE); 743 if (is_physical_memory(a)) 744 va = pmap_kenter_temporary(trunc_page(a), i); 745 else 746 va = pmap_kenter_temporary(trunc_page(0), i); 747 } 748 749 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 750 csio.ccb_h.ccb_state = DA_CCB_DUMP; 751 scsi_read_write(&csio, 752 /*retries*/1, 753 dadone, 754 MSG_ORDERED_Q_TAG, 755 /*read*/FALSE, 756 /*byte2*/0, 757 /*minimum_cmd_size*/ softc->minimum_cmd_size, 758 blknum, 759 blkcnt * dumppages, 760 /*data_ptr*/(u_int8_t *) va, 761 /*dxfer_len*/blkcnt * secsize * dumppages, 762 /*sense_len*/SSD_FULL_SIZE, 763 DA_DEFAULT_TIMEOUT * 1000); 764 xpt_polled_action((union ccb *)&csio); 765 766 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 767 printf("Aborting dump due to I/O error.\n"); 768 if ((csio.ccb_h.status & CAM_STATUS_MASK) == 769 CAM_SCSI_STATUS_ERROR) 770 scsi_sense_print(&csio); 771 else 772 printf("status == 0x%x, scsi status == 0x%x\n", 773 csio.ccb_h.status, csio.scsi_status); 774 return(EIO); 775 } 776 777 if (dumpstatus(addr, (long)(num * softc->params.secsize)) < 0) 778 return (EINTR); 779 780 /* update block count */ 781 num -= blkcnt * dumppages; 782 blknum += blkcnt * dumppages; 783 addr += PAGE_SIZE * dumppages; 784 } 785 786 /* 787 * Sync the disk cache contents to the physical media. 788 */ 789 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 790 791 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 792 csio.ccb_h.ccb_state = DA_CCB_DUMP; 793 scsi_synchronize_cache(&csio, 794 /*retries*/1, 795 /*cbfcnp*/dadone, 796 MSG_SIMPLE_Q_TAG, 797 /*begin_lba*/0,/* Cover the whole disk */ 798 /*lb_count*/0, 799 SSD_FULL_SIZE, 800 5 * 60 * 1000); 801 xpt_polled_action((union ccb *)&csio); 802 803 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 804 if ((csio.ccb_h.status & CAM_STATUS_MASK) == 805 CAM_SCSI_STATUS_ERROR) { 806 int asc, ascq; 807 int sense_key, error_code; 808 809 scsi_extract_sense(&csio.sense_data, 810 &error_code, 811 &sense_key, 812 &asc, &ascq); 813 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 814 scsi_sense_print(&csio); 815 } else { 816 xpt_print_path(periph->path); 817 printf("Synchronize cache failed, status " 818 "== 0x%x, scsi status == 0x%x\n", 819 csio.ccb_h.status, csio.scsi_status); 820 } 821 } 822 } 823 return (0); 824 } 825 826 static void 827 dainit(void) 828 { 829 cam_status status; 830 struct cam_path *path; 831 832 /* 833 * Create our extend array for storing the devices we attach to. 834 */ 835 daperiphs = cam_extend_new(); 836 SLIST_INIT(&softc_list); 837 if (daperiphs == NULL) { 838 printf("da: Failed to alloc extend array!\n"); 839 return; 840 } 841 842 /* 843 * Install a global async callback. This callback will 844 * receive async callbacks like "new device found". 845 */ 846 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 847 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 848 849 if (status == CAM_REQ_CMP) { 850 struct ccb_setasync csa; 851 852 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 853 csa.ccb_h.func_code = XPT_SASYNC_CB; 854 csa.event_enable = AC_FOUND_DEVICE; 855 csa.callback = daasync; 856 csa.callback_arg = NULL; 857 xpt_action((union ccb *)&csa); 858 status = csa.ccb_h.status; 859 xpt_free_path(path); 860 } 861 862 if (status != CAM_REQ_CMP) { 863 printf("da: Failed to attach master async callback " 864 "due to status 0x%x!\n", status); 865 } else { 866 867 /* 868 * Schedule a periodic event to occasionally send an 869 * ordered tag to a device. 870 */ 871 timeout(dasendorderedtag, NULL, 872 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL); 873 874 /* Register our shutdown event handler */ 875 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, 876 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 877 printf("dainit: shutdown event registration failed!\n"); 878 } 879 } 880 881 static void 882 daoninvalidate(struct cam_periph *periph) 883 { 884 int s; 885 struct da_softc *softc; 886 struct bio *q_bp; 887 struct ccb_setasync csa; 888 889 softc = (struct da_softc *)periph->softc; 890 891 /* 892 * De-register any async callbacks. 893 */ 894 xpt_setup_ccb(&csa.ccb_h, periph->path, 895 /* priority */ 5); 896 csa.ccb_h.func_code = XPT_SASYNC_CB; 897 csa.event_enable = 0; 898 csa.callback = daasync; 899 csa.callback_arg = periph; 900 xpt_action((union ccb *)&csa); 901 902 softc->flags |= DA_FLAG_PACK_INVALID; 903 904 /* 905 * Although the oninvalidate() routines are always called at 906 * splsoftcam, we need to be at splbio() here to keep the buffer 907 * queue from being modified while we traverse it. 908 */ 909 s = splbio(); 910 911 /* 912 * Return all queued I/O with ENXIO. 913 * XXX Handle any transactions queued to the card 914 * with XPT_ABORT_CCB. 915 */ 916 while ((q_bp = bioq_first(&softc->bio_queue)) != NULL){ 917 bioq_remove(&softc->bio_queue, q_bp); 918 q_bp->bio_resid = q_bp->bio_bcount; 919 biofinish(q_bp, NULL, ENXIO); 920 } 921 splx(s); 922 923 SLIST_REMOVE(&softc_list, softc, da_softc, links); 924 925 xpt_print_path(periph->path); 926 printf("lost device\n"); 927 } 928 929 static void 930 dacleanup(struct cam_periph *periph) 931 { 932 struct da_softc *softc; 933 934 softc = (struct da_softc *)periph->softc; 935 936 devstat_remove_entry(&softc->device_stats); 937 cam_extend_release(daperiphs, periph->unit_number); 938 xpt_print_path(periph->path); 939 printf("removing device entry\n"); 940 if (softc->dev) { 941 disk_destroy(softc->dev); 942 } 943 free(softc, M_DEVBUF); 944 } 945 946 static void 947 daasync(void *callback_arg, u_int32_t code, 948 struct cam_path *path, void *arg) 949 { 950 struct cam_periph *periph; 951 952 periph = (struct cam_periph *)callback_arg; 953 switch (code) { 954 case AC_FOUND_DEVICE: 955 { 956 struct ccb_getdev *cgd; 957 cam_status status; 958 959 cgd = (struct ccb_getdev *)arg; 960 if (cgd == NULL) 961 break; 962 963 if (SID_TYPE(&cgd->inq_data) != T_DIRECT 964 && SID_TYPE(&cgd->inq_data) != T_OPTICAL) 965 break; 966 967 /* 968 * Allocate a peripheral instance for 969 * this device and start the probe 970 * process. 971 */ 972 status = cam_periph_alloc(daregister, daoninvalidate, 973 dacleanup, dastart, 974 "da", CAM_PERIPH_BIO, 975 cgd->ccb_h.path, daasync, 976 AC_FOUND_DEVICE, cgd); 977 978 if (status != CAM_REQ_CMP 979 && status != CAM_REQ_INPROG) 980 printf("daasync: Unable to attach to new device " 981 "due to status 0x%x\n", status); 982 break; 983 } 984 case AC_SENT_BDR: 985 case AC_BUS_RESET: 986 { 987 struct da_softc *softc; 988 struct ccb_hdr *ccbh; 989 int s; 990 991 softc = (struct da_softc *)periph->softc; 992 s = splsoftcam(); 993 /* 994 * Don't fail on the expected unit attention 995 * that will occur. 996 */ 997 softc->flags |= DA_FLAG_RETRY_UA; 998 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) 999 ccbh->ccb_state |= DA_CCB_RETRY_UA; 1000 splx(s); 1001 /* FALLTHROUGH*/ 1002 } 1003 default: 1004 cam_periph_async(periph, code, path, arg); 1005 break; 1006 } 1007 } 1008 1009 static cam_status 1010 daregister(struct cam_periph *periph, void *arg) 1011 { 1012 int s; 1013 struct da_softc *softc; 1014 struct ccb_setasync csa; 1015 struct ccb_getdev *cgd; 1016 caddr_t match; 1017 1018 cgd = (struct ccb_getdev *)arg; 1019 if (periph == NULL) { 1020 printf("daregister: periph was NULL!!\n"); 1021 return(CAM_REQ_CMP_ERR); 1022 } 1023 1024 if (cgd == NULL) { 1025 printf("daregister: no getdev CCB, can't register device\n"); 1026 return(CAM_REQ_CMP_ERR); 1027 } 1028 1029 softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); 1030 1031 if (softc == NULL) { 1032 printf("daregister: Unable to probe new device. " 1033 "Unable to allocate softc\n"); 1034 return(CAM_REQ_CMP_ERR); 1035 } 1036 1037 bzero(softc, sizeof(*softc)); 1038 LIST_INIT(&softc->pending_ccbs); 1039 softc->state = DA_STATE_PROBE; 1040 bioq_init(&softc->bio_queue); 1041 if (SID_IS_REMOVABLE(&cgd->inq_data)) 1042 softc->flags |= DA_FLAG_PACK_REMOVABLE; 1043 if ((cgd->inq_data.flags & SID_CmdQue) != 0) 1044 softc->flags |= DA_FLAG_TAGGED_QUEUING; 1045 1046 periph->softc = softc; 1047 1048 cam_extend_set(daperiphs, periph->unit_number, periph); 1049 1050 /* 1051 * See if this device has any quirks. 1052 */ 1053 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 1054 (caddr_t)da_quirk_table, 1055 sizeof(da_quirk_table)/sizeof(*da_quirk_table), 1056 sizeof(*da_quirk_table), scsi_inquiry_match); 1057 1058 if (match != NULL) 1059 softc->quirks = ((struct da_quirk_entry *)match)->quirks; 1060 else 1061 softc->quirks = DA_Q_NONE; 1062 1063 if (softc->quirks & DA_Q_NO_6_BYTE) 1064 softc->minimum_cmd_size = 10; 1065 else 1066 softc->minimum_cmd_size = 6; 1067 1068 /* 1069 * Block our timeout handler while we 1070 * add this softc to the dev list. 1071 */ 1072 s = splsoftclock(); 1073 SLIST_INSERT_HEAD(&softc_list, softc, links); 1074 splx(s); 1075 1076 /* 1077 * The DA driver supports a blocksize, but 1078 * we don't know the blocksize until we do 1079 * a read capacity. So, set a flag to 1080 * indicate that the blocksize is 1081 * unavailable right now. We'll clear the 1082 * flag as soon as we've done a read capacity. 1083 */ 1084 devstat_add_entry(&softc->device_stats, "da", 1085 periph->unit_number, 0, 1086 DEVSTAT_BS_UNAVAILABLE, 1087 SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI, 1088 DEVSTAT_PRIORITY_DISK); 1089 1090 /* 1091 * Register this media as a disk 1092 */ 1093 softc->dev = disk_create(periph->unit_number, &softc->disk, 0, 1094 &da_cdevsw, &dadisk_cdevsw); 1095 1096 /* 1097 * Add async callbacks for bus reset and 1098 * bus device reset calls. I don't bother 1099 * checking if this fails as, in most cases, 1100 * the system will function just fine without 1101 * them and the only alternative would be to 1102 * not attach the device on failure. 1103 */ 1104 xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); 1105 csa.ccb_h.func_code = XPT_SASYNC_CB; 1106 csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; 1107 csa.callback = daasync; 1108 csa.callback_arg = periph; 1109 xpt_action((union ccb *)&csa); 1110 /* 1111 * Lock this peripheral until we are setup. 1112 * This first call can't block 1113 */ 1114 (void)cam_periph_lock(periph, PRIBIO); 1115 xpt_schedule(periph, /*priority*/5); 1116 1117 return(CAM_REQ_CMP); 1118 } 1119 1120 static void 1121 dastart(struct cam_periph *periph, union ccb *start_ccb) 1122 { 1123 struct da_softc *softc; 1124 1125 softc = (struct da_softc *)periph->softc; 1126 1127 1128 switch (softc->state) { 1129 case DA_STATE_NORMAL: 1130 { 1131 /* Pull a buffer from the queue and get going on it */ 1132 struct bio *bp; 1133 int s; 1134 1135 /* 1136 * See if there is a buf with work for us to do.. 1137 */ 1138 s = splbio(); 1139 bp = bioq_first(&softc->bio_queue); 1140 if (periph->immediate_priority <= periph->pinfo.priority) { 1141 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 1142 ("queuing for immediate ccb\n")); 1143 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING; 1144 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 1145 periph_links.sle); 1146 periph->immediate_priority = CAM_PRIORITY_NONE; 1147 splx(s); 1148 wakeup(&periph->ccb_list); 1149 } else if (bp == NULL) { 1150 splx(s); 1151 xpt_release_ccb(start_ccb); 1152 } else { 1153 int oldspl; 1154 u_int8_t tag_code; 1155 1156 bioq_remove(&softc->bio_queue, bp); 1157 1158 devstat_start_transaction(&softc->device_stats); 1159 1160 if ((bp->bio_flags & BIO_ORDERED) != 0 1161 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { 1162 softc->flags &= ~DA_FLAG_NEED_OTAG; 1163 softc->ordered_tag_count++; 1164 tag_code = MSG_ORDERED_Q_TAG; 1165 } else { 1166 tag_code = MSG_SIMPLE_Q_TAG; 1167 } 1168 scsi_read_write(&start_ccb->csio, 1169 /*retries*/da_retry_count, 1170 dadone, 1171 tag_code, 1172 bp->bio_cmd == BIO_READ, 1173 /*byte2*/0, 1174 softc->minimum_cmd_size, 1175 bp->bio_pblkno, 1176 bp->bio_bcount / softc->params.secsize, 1177 bp->bio_data, 1178 bp->bio_bcount, 1179 /*sense_len*/SSD_FULL_SIZE, 1180 da_default_timeout * 1000); 1181 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 1182 1183 /* 1184 * Block out any asyncronous callbacks 1185 * while we touch the pending ccb list. 1186 */ 1187 oldspl = splcam(); 1188 LIST_INSERT_HEAD(&softc->pending_ccbs, 1189 &start_ccb->ccb_h, periph_links.le); 1190 splx(oldspl); 1191 1192 /* We expect a unit attention from this device */ 1193 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 1194 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 1195 softc->flags &= ~DA_FLAG_RETRY_UA; 1196 } 1197 1198 start_ccb->ccb_h.ccb_bp = bp; 1199 bp = bioq_first(&softc->bio_queue); 1200 splx(s); 1201 1202 xpt_action(start_ccb); 1203 } 1204 1205 if (bp != NULL) { 1206 /* Have more work to do, so ensure we stay scheduled */ 1207 xpt_schedule(periph, /* XXX priority */1); 1208 } 1209 break; 1210 } 1211 case DA_STATE_PROBE: 1212 { 1213 struct ccb_scsiio *csio; 1214 struct scsi_read_capacity_data *rcap; 1215 1216 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 1217 M_TEMP, 1218 M_NOWAIT); 1219 if (rcap == NULL) { 1220 printf("dastart: Couldn't malloc read_capacity data\n"); 1221 /* da_free_periph??? */ 1222 break; 1223 } 1224 csio = &start_ccb->csio; 1225 scsi_read_capacity(csio, 1226 /*retries*/4, 1227 dadone, 1228 MSG_SIMPLE_Q_TAG, 1229 rcap, 1230 SSD_FULL_SIZE, 1231 /*timeout*/5000); 1232 start_ccb->ccb_h.ccb_bp = NULL; 1233 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE; 1234 xpt_action(start_ccb); 1235 break; 1236 } 1237 } 1238 } 1239 1240 1241 static void 1242 dadone(struct cam_periph *periph, union ccb *done_ccb) 1243 { 1244 struct da_softc *softc; 1245 struct ccb_scsiio *csio; 1246 1247 softc = (struct da_softc *)periph->softc; 1248 csio = &done_ccb->csio; 1249 switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) { 1250 case DA_CCB_BUFFER_IO: 1251 { 1252 struct bio *bp; 1253 int oldspl; 1254 1255 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 1256 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1257 int error; 1258 int s; 1259 int sf; 1260 1261 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 1262 sf = SF_RETRY_UA; 1263 else 1264 sf = 0; 1265 1266 error = daerror(done_ccb, CAM_RETRY_SELTO, sf); 1267 if (error == ERESTART) { 1268 /* 1269 * A retry was scheuled, so 1270 * just return. 1271 */ 1272 return; 1273 } 1274 if (error != 0) { 1275 struct bio *q_bp; 1276 1277 s = splbio(); 1278 1279 if (error == ENXIO) { 1280 /* 1281 * Catastrophic error. Mark our pack as 1282 * invalid. 1283 */ 1284 /* XXX See if this is really a media 1285 * change first. 1286 */ 1287 xpt_print_path(periph->path); 1288 printf("Invalidating pack\n"); 1289 softc->flags |= DA_FLAG_PACK_INVALID; 1290 } 1291 1292 /* 1293 * return all queued I/O with EIO, so that 1294 * the client can retry these I/Os in the 1295 * proper order should it attempt to recover. 1296 */ 1297 while ((q_bp = bioq_first(&softc->bio_queue)) 1298 != NULL) { 1299 bioq_remove(&softc->bio_queue, q_bp); 1300 q_bp->bio_resid = q_bp->bio_bcount; 1301 biofinish(q_bp, NULL, EIO); 1302 } 1303 splx(s); 1304 bp->bio_error = error; 1305 bp->bio_resid = bp->bio_bcount; 1306 bp->bio_flags |= BIO_ERROR; 1307 } else { 1308 bp->bio_resid = csio->resid; 1309 bp->bio_error = 0; 1310 if (bp->bio_resid != 0) { 1311 /* Short transfer ??? */ 1312 bp->bio_flags |= BIO_ERROR; 1313 } 1314 } 1315 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1316 cam_release_devq(done_ccb->ccb_h.path, 1317 /*relsim_flags*/0, 1318 /*reduction*/0, 1319 /*timeout*/0, 1320 /*getcount_only*/0); 1321 } else { 1322 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1323 panic("REQ_CMP with QFRZN"); 1324 bp->bio_resid = csio->resid; 1325 if (csio->resid > 0) 1326 bp->bio_flags |= BIO_ERROR; 1327 } 1328 1329 /* 1330 * Block out any asyncronous callbacks 1331 * while we touch the pending ccb list. 1332 */ 1333 oldspl = splcam(); 1334 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 1335 splx(oldspl); 1336 1337 if (softc->device_stats.busy_count == 0) 1338 softc->flags |= DA_FLAG_WENT_IDLE; 1339 1340 biofinish(bp, &softc->device_stats, 0); 1341 break; 1342 } 1343 case DA_CCB_PROBE: 1344 { 1345 struct scsi_read_capacity_data *rdcap; 1346 char announce_buf[80]; 1347 1348 rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; 1349 1350 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 1351 struct disk_params *dp; 1352 1353 dasetgeom(periph, rdcap); 1354 dp = &softc->params; 1355 snprintf(announce_buf, sizeof(announce_buf), 1356 "%luMB (%u %u byte sectors: %dH %dS/T %dC)", 1357 (unsigned long) (((u_int64_t)dp->secsize * 1358 dp->sectors) / (1024*1024)), dp->sectors, 1359 dp->secsize, dp->heads, dp->secs_per_track, 1360 dp->cylinders); 1361 } else { 1362 int error; 1363 1364 announce_buf[0] = '\0'; 1365 1366 /* 1367 * Retry any UNIT ATTENTION type errors. They 1368 * are expected at boot. 1369 */ 1370 error = daerror(done_ccb, CAM_RETRY_SELTO, 1371 SF_RETRY_UA|SF_NO_PRINT); 1372 if (error == ERESTART) { 1373 /* 1374 * A retry was scheuled, so 1375 * just return. 1376 */ 1377 return; 1378 } else if (error != 0) { 1379 struct scsi_sense_data *sense; 1380 int asc, ascq; 1381 int sense_key, error_code; 1382 int have_sense; 1383 cam_status status; 1384 struct ccb_getdev cgd; 1385 1386 /* Don't wedge this device's queue */ 1387 status = done_ccb->ccb_h.status; 1388 if ((status & CAM_DEV_QFRZN) != 0) 1389 cam_release_devq(done_ccb->ccb_h.path, 1390 /*relsim_flags*/0, 1391 /*reduction*/0, 1392 /*timeout*/0, 1393 /*getcount_only*/0); 1394 1395 1396 xpt_setup_ccb(&cgd.ccb_h, 1397 done_ccb->ccb_h.path, 1398 /* priority */ 1); 1399 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1400 xpt_action((union ccb *)&cgd); 1401 1402 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0) 1403 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0) 1404 || ((status & CAM_AUTOSNS_VALID) == 0)) 1405 have_sense = FALSE; 1406 else 1407 have_sense = TRUE; 1408 1409 if (have_sense) { 1410 sense = &csio->sense_data; 1411 scsi_extract_sense(sense, &error_code, 1412 &sense_key, 1413 &asc, &ascq); 1414 } 1415 /* 1416 * Attach to anything that claims to be a 1417 * direct access or optical disk device, 1418 * as long as it doesn't return a "Logical 1419 * unit not supported" (0x25) error. 1420 */ 1421 if ((have_sense) && (asc != 0x25) 1422 && (error_code == SSD_CURRENT_ERROR)) { 1423 const char *sense_key_desc; 1424 const char *asc_desc; 1425 1426 scsi_sense_desc(sense_key, asc, ascq, 1427 &cgd.inq_data, 1428 &sense_key_desc, 1429 &asc_desc); 1430 snprintf(announce_buf, 1431 sizeof(announce_buf), 1432 "Attempt to query device " 1433 "size failed: %s, %s", 1434 sense_key_desc, 1435 asc_desc); 1436 } else { 1437 if (have_sense) 1438 scsi_sense_print( 1439 &done_ccb->csio); 1440 else { 1441 xpt_print_path(periph->path); 1442 printf("got CAM status %#x\n", 1443 done_ccb->ccb_h.status); 1444 } 1445 1446 xpt_print_path(periph->path); 1447 printf("fatal error, failed" 1448 " to attach to device\n"); 1449 1450 /* 1451 * Free up resources. 1452 */ 1453 cam_periph_invalidate(periph); 1454 } 1455 } 1456 } 1457 free(rdcap, M_TEMP); 1458 if (announce_buf[0] != '\0') 1459 xpt_announce_periph(periph, announce_buf); 1460 softc->state = DA_STATE_NORMAL; 1461 /* 1462 * Since our peripheral may be invalidated by an error 1463 * above or an external event, we must release our CCB 1464 * before releasing the probe lock on the peripheral. 1465 * The peripheral will only go away once the last lock 1466 * is removed, and we need it around for the CCB release 1467 * operation. 1468 */ 1469 xpt_release_ccb(done_ccb); 1470 cam_periph_unlock(periph); 1471 return; 1472 } 1473 case DA_CCB_WAITING: 1474 { 1475 /* Caller will release the CCB */ 1476 wakeup(&done_ccb->ccb_h.cbfcnp); 1477 return; 1478 } 1479 case DA_CCB_DUMP: 1480 /* No-op. We're polling */ 1481 return; 1482 default: 1483 break; 1484 } 1485 xpt_release_ccb(done_ccb); 1486 } 1487 1488 static int 1489 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1490 { 1491 struct da_softc *softc; 1492 struct cam_periph *periph; 1493 1494 periph = xpt_path_periph(ccb->ccb_h.path); 1495 softc = (struct da_softc *)periph->softc; 1496 1497 /* 1498 * XXX 1499 * Until we have a better way of doing pack validation, 1500 * don't treat UAs as errors. 1501 */ 1502 sense_flags |= SF_RETRY_UA; 1503 return(cam_periph_error(ccb, cam_flags, sense_flags, 1504 &softc->saved_ccb)); 1505 } 1506 1507 static void 1508 daprevent(struct cam_periph *periph, int action) 1509 { 1510 struct da_softc *softc; 1511 union ccb *ccb; 1512 int error; 1513 1514 softc = (struct da_softc *)periph->softc; 1515 1516 if (((action == PR_ALLOW) 1517 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 1518 || ((action == PR_PREVENT) 1519 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 1520 return; 1521 } 1522 1523 ccb = cam_periph_getccb(periph, /*priority*/1); 1524 1525 scsi_prevent(&ccb->csio, 1526 /*retries*/1, 1527 /*cbcfp*/dadone, 1528 MSG_SIMPLE_Q_TAG, 1529 action, 1530 SSD_FULL_SIZE, 1531 5000); 1532 1533 error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO, 1534 SF_RETRY_UA, &softc->device_stats); 1535 1536 if (error == 0) { 1537 if (action == PR_ALLOW) 1538 softc->flags &= ~DA_FLAG_PACK_LOCKED; 1539 else 1540 softc->flags |= DA_FLAG_PACK_LOCKED; 1541 } 1542 1543 xpt_release_ccb(ccb); 1544 } 1545 1546 static void 1547 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap) 1548 { 1549 struct ccb_calc_geometry ccg; 1550 struct da_softc *softc; 1551 struct disk_params *dp; 1552 1553 softc = (struct da_softc *)periph->softc; 1554 1555 dp = &softc->params; 1556 dp->secsize = scsi_4btoul(rdcap->length); 1557 dp->sectors = scsi_4btoul(rdcap->addr) + 1; 1558 /* 1559 * Have the controller provide us with a geometry 1560 * for this disk. The only time the geometry 1561 * matters is when we boot and the controller 1562 * is the only one knowledgeable enough to come 1563 * up with something that will make this a bootable 1564 * device. 1565 */ 1566 xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1); 1567 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 1568 ccg.block_size = dp->secsize; 1569 ccg.volume_size = dp->sectors; 1570 ccg.heads = 0; 1571 ccg.secs_per_track = 0; 1572 ccg.cylinders = 0; 1573 xpt_action((union ccb*)&ccg); 1574 dp->heads = ccg.heads; 1575 dp->secs_per_track = ccg.secs_per_track; 1576 dp->cylinders = ccg.cylinders; 1577 } 1578 1579 static void 1580 dasendorderedtag(void *arg) 1581 { 1582 struct da_softc *softc; 1583 int s; 1584 1585 for (softc = SLIST_FIRST(&softc_list); 1586 softc != NULL; 1587 softc = SLIST_NEXT(softc, links)) { 1588 s = splsoftcam(); 1589 if ((softc->ordered_tag_count == 0) 1590 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) { 1591 softc->flags |= DA_FLAG_NEED_OTAG; 1592 } 1593 if (softc->device_stats.busy_count > 0) 1594 softc->flags &= ~DA_FLAG_WENT_IDLE; 1595 1596 softc->ordered_tag_count = 0; 1597 splx(s); 1598 } 1599 /* Queue us up again */ 1600 timeout(dasendorderedtag, NULL, 1601 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL); 1602 } 1603 1604 /* 1605 * Step through all DA peripheral drivers, and if the device is still open, 1606 * sync the disk cache to physical media. 1607 */ 1608 static void 1609 dashutdown(void * arg, int howto) 1610 { 1611 struct cam_periph *periph; 1612 struct da_softc *softc; 1613 1614 TAILQ_FOREACH(periph, &dadriver.units, unit_links) { 1615 union ccb ccb; 1616 softc = (struct da_softc *)periph->softc; 1617 1618 /* 1619 * We only sync the cache if the drive is still open, and 1620 * if the drive is capable of it.. 1621 */ 1622 if (((softc->flags & DA_FLAG_OPEN) == 0) 1623 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) 1624 continue; 1625 1626 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1); 1627 1628 ccb.ccb_h.ccb_state = DA_CCB_DUMP; 1629 scsi_synchronize_cache(&ccb.csio, 1630 /*retries*/1, 1631 /*cbfcnp*/dadone, 1632 MSG_SIMPLE_Q_TAG, 1633 /*begin_lba*/0, /* whole disk */ 1634 /*lb_count*/0, 1635 SSD_FULL_SIZE, 1636 5 * 60 * 1000); 1637 1638 xpt_polled_action(&ccb); 1639 1640 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1641 if (((ccb.ccb_h.status & CAM_STATUS_MASK) == 1642 CAM_SCSI_STATUS_ERROR) 1643 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){ 1644 int error_code, sense_key, asc, ascq; 1645 1646 scsi_extract_sense(&ccb.csio.sense_data, 1647 &error_code, &sense_key, 1648 &asc, &ascq); 1649 1650 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 1651 scsi_sense_print(&ccb.csio); 1652 } else { 1653 xpt_print_path(periph->path); 1654 printf("Synchronize cache failed, status " 1655 "== 0x%x, scsi status == 0x%x\n", 1656 ccb.ccb_h.status, ccb.csio.scsi_status); 1657 } 1658 } 1659 1660 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 1661 cam_release_devq(ccb.ccb_h.path, 1662 /*relsim_flags*/0, 1663 /*reduction*/0, 1664 /*timeout*/0, 1665 /*getcount_only*/0); 1666 1667 } 1668 } 1669 1670 #else /* !_KERNEL */ 1671 1672 /* 1673 * XXX This is only left out of the kernel build to silence warnings. If, 1674 * for some reason this function is used in the kernel, the ifdefs should 1675 * be moved so it is included both in the kernel and userland. 1676 */ 1677 void 1678 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, 1679 void (*cbfcnp)(struct cam_periph *, union ccb *), 1680 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, 1681 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 1682 u_int32_t timeout) 1683 { 1684 struct scsi_format_unit *scsi_cmd; 1685 1686 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; 1687 scsi_cmd->opcode = FORMAT_UNIT; 1688 scsi_cmd->byte2 = byte2; 1689 scsi_ulto2b(ileave, scsi_cmd->interleave); 1690 1691 cam_fill_csio(csio, 1692 retries, 1693 cbfcnp, 1694 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 1695 tag_action, 1696 data_ptr, 1697 dxfer_len, 1698 sense_len, 1699 sizeof(*scsi_cmd), 1700 timeout); 1701 } 1702 1703 #endif /* _KERNEL */ 1704