1 /* 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * Copyright (c) 1997 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #ifdef _KERNEL 32 #include "opt_hw_wdog.h" 33 #endif /* _KERNEL */ 34 35 #include <sys/param.h> 36 37 #ifdef _KERNEL 38 #include <sys/systm.h> 39 #include <sys/kernel.h> 40 #include <sys/bio.h> 41 #include <sys/sysctl.h> 42 #endif /* _KERNEL */ 43 44 #include <sys/devicestat.h> 45 #include <sys/conf.h> 46 #include <sys/disk.h> 47 #include <sys/eventhandler.h> 48 #include <sys/malloc.h> 49 #include <sys/cons.h> 50 51 #include <machine/md_var.h> 52 53 #include <vm/vm.h> 54 #include <vm/pmap.h> 55 56 #ifndef _KERNEL 57 #include <stdio.h> 58 #include <string.h> 59 #endif /* _KERNEL */ 60 61 #include <cam/cam.h> 62 #include <cam/cam_ccb.h> 63 #include <cam/cam_extend.h> 64 #include <cam/cam_periph.h> 65 #include <cam/cam_xpt_periph.h> 66 67 #include <cam/scsi/scsi_message.h> 68 69 #ifndef _KERNEL 70 #include <cam/scsi/scsi_da.h> 71 #endif /* !_KERNEL */ 72 73 #ifdef _KERNEL 74 typedef enum { 75 DA_STATE_PROBE, 76 DA_STATE_NORMAL 77 } da_state; 78 79 typedef enum { 80 DA_FLAG_PACK_INVALID = 0x001, 81 DA_FLAG_NEW_PACK = 0x002, 82 DA_FLAG_PACK_LOCKED = 0x004, 83 DA_FLAG_PACK_REMOVABLE = 0x008, 84 DA_FLAG_TAGGED_QUEUING = 0x010, 85 DA_FLAG_NEED_OTAG = 0x020, 86 DA_FLAG_WENT_IDLE = 0x040, 87 DA_FLAG_RETRY_UA = 0x080, 88 DA_FLAG_OPEN = 0x100 89 } da_flags; 90 91 typedef enum { 92 DA_Q_NONE = 0x00, 93 DA_Q_NO_SYNC_CACHE = 0x01, 94 DA_Q_NO_6_BYTE = 0x02 95 } da_quirks; 96 97 typedef enum { 98 DA_CCB_PROBE = 0x01, 99 DA_CCB_BUFFER_IO = 0x02, 100 DA_CCB_WAITING = 0x03, 101 DA_CCB_DUMP = 0x04, 102 DA_CCB_TYPE_MASK = 0x0F, 103 DA_CCB_RETRY_UA = 0x10 104 } da_ccb_state; 105 106 /* Offsets into our private area for storing information */ 107 #define ccb_state ppriv_field0 108 #define ccb_bp ppriv_ptr1 109 110 struct disk_params { 111 u_int8_t heads; 112 u_int16_t cylinders; 113 u_int8_t secs_per_track; 114 u_int32_t secsize; /* Number of bytes/sector */ 115 u_int32_t sectors; /* total number sectors */ 116 }; 117 118 struct da_softc { 119 struct bio_queue_head bio_queue; 120 struct devstat device_stats; 121 SLIST_ENTRY(da_softc) links; 122 LIST_HEAD(, ccb_hdr) pending_ccbs; 123 da_state state; 124 da_flags flags; 125 da_quirks quirks; 126 int minimum_cmd_size; 127 int ordered_tag_count; 128 struct disk_params params; 129 struct disk disk; 130 union ccb saved_ccb; 131 dev_t dev; 132 }; 133 134 struct da_quirk_entry { 135 struct scsi_inquiry_pattern inq_pat; 136 da_quirks quirks; 137 }; 138 139 static const char quantum[] = "QUANTUM"; 140 static const char microp[] = "MICROP"; 141 142 static struct da_quirk_entry da_quirk_table[] = 143 { 144 { 145 /* 146 * Fujitsu M2513A MO drives. 147 * Tested devices: M2513A2 firmware versions 1200 & 1300. 148 * (dip switch selects whether T_DIRECT or T_OPTICAL device) 149 * Reported by: W.Scholten <whs@xs4all.nl> 150 */ 151 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 152 /*quirks*/ DA_Q_NO_SYNC_CACHE 153 }, 154 { 155 /* See above. */ 156 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 157 /*quirks*/ DA_Q_NO_SYNC_CACHE 158 }, 159 { 160 /* 161 * This particular Fujitsu drive doesn't like the 162 * synchronize cache command. 163 * Reported by: Tom Jackson <toj@gorilla.net> 164 */ 165 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, 166 /*quirks*/ DA_Q_NO_SYNC_CACHE 167 168 }, 169 { 170 /* 171 * This drive doesn't like the synchronize cache command 172 * either. Reported by: Matthew Jacob <mjacob@feral.com> 173 * in NetBSD PR kern/6027, August 24, 1998. 174 */ 175 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, 176 /*quirks*/ DA_Q_NO_SYNC_CACHE 177 }, 178 { 179 /* 180 * This drive doesn't like the synchronize cache command 181 * either. Reported by: Hellmuth Michaelis (hm@kts.org) 182 * (PR 8882). 183 */ 184 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, 185 /*quirks*/ DA_Q_NO_SYNC_CACHE 186 }, 187 { 188 /* 189 * Doesn't like the synchronize cache command. 190 * Reported by: Blaz Zupan <blaz@gold.amis.net> 191 */ 192 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, 193 /*quirks*/ DA_Q_NO_SYNC_CACHE 194 }, 195 { 196 /* 197 * Doesn't like the synchronize cache command. 198 */ 199 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, 200 /*quirks*/ DA_Q_NO_SYNC_CACHE 201 }, 202 { 203 /* 204 * Doesn't like the synchronize cache command. 205 */ 206 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, 207 /*quirks*/ DA_Q_NO_SYNC_CACHE 208 }, 209 { 210 /* 211 * Doesn't work correctly with 6 byte reads/writes. 212 * Returns illegal request, and points to byte 9 of the 213 * 6-byte CDB. 214 * Reported by: Adam McDougall <bsdx@spawnet.com> 215 */ 216 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, 217 /*quirks*/ DA_Q_NO_6_BYTE 218 }, 219 { 220 /* 221 * See above. 222 */ 223 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, 224 /*quirks*/ DA_Q_NO_6_BYTE 225 }, 226 227 /* Below a list of quirks for USB devices supported by umass. */ 228 { 229 /* 230 * This USB floppy drive uses the UFI command set. This 231 * command set is a derivative of the ATAPI command set and 232 * does not support READ_6 commands only READ_10. It also does 233 * not support sync cache (0x35). 234 */ 235 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Y-E DATA", "USB-FDU", "*"}, 236 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 237 }, 238 { 239 /* Another USB floppy */ 240 {T_DIRECT, SIP_MEDIA_REMOVABLE, "MATSHITA", "FDD CF-VFDU*","*"}, 241 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 242 }, 243 { 244 /* 245 * Sony Memory Stick adapter MSAC-US1 and 246 * Sony PCG-C1VJ Internal Memory Stick Slot (MSC-U01). 247 * Make all sony MS* products use this quirk. 248 */ 249 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "MS*", "*"}, 250 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 251 }, 252 { 253 /* 254 * Sony Memory Stick adapter for the CLIE series 255 * of PalmOS PDA's 256 */ 257 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "CLIE*", "*"}, 258 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 259 }, 260 { 261 /* 262 * Sony DSC cameras (DSC-S30, DSC-S50, DSC-S70) 263 */ 264 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Sony", "Sony DSC", "*"}, 265 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 266 }, 267 { 268 /* 269 * Maxtor 3000LE USB Drive 270 */ 271 {T_DIRECT, SIP_MEDIA_FIXED, "MAXTOR*", "K040H2*", "*"}, 272 /*quirks*/ DA_Q_NO_6_BYTE 273 }, 274 { 275 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "MCF3064AP", "*"}, 276 /*quirks*/ DA_Q_NO_6_BYTE 277 }, 278 { 279 /* 280 * Microtech USB CameraMate 281 */ 282 {T_DIRECT, SIP_MEDIA_REMOVABLE, "eUSB Compact*", "Compact Flash*", "*"}, 283 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 284 }, 285 { 286 /* 287 * The vendor, product and version strings coming from the 288 * controller are null terminated instead of being padded with 289 * spaces. The trailing wildcard character '*' is required. 290 */ 291 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SMSC*", "USB FDC*","*"}, 292 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 293 }, 294 { 295 /* 296 * Olympus digital cameras (C-3040ZOOM, C-2040ZOOM, C-1) 297 */ 298 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "C-*", "*"}, 299 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 300 }, 301 { 302 /* 303 * Olympus digital cameras (D-370) 304 */ 305 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "D-*", "*"}, 306 /*quirks*/ DA_Q_NO_6_BYTE 307 }, 308 { 309 /* 310 * Olympus digital cameras (E-100RS, E-10). 311 */ 312 {T_DIRECT, SIP_MEDIA_REMOVABLE, "OLYMPUS", "E-*", "*"}, 313 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 314 }, 315 { 316 /* 317 * KingByte Pen Drives 318 */ 319 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NO BRAND", "PEN DRIVE", "*"}, 320 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 321 }, 322 { 323 /* 324 * FujiFilm Camera 325 */ 326 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJIFILMUSB-DRIVEUNIT", "USB-DRIVEUNIT", "*"}, 327 /*quirks*/ DA_Q_NO_6_BYTE|DA_Q_NO_SYNC_CACHE 328 }, 329 { 330 /* 331 * Nikon Coolpix E775/E995 Cameras 332 */ 333 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NIKON", "NIKON DSC E*", "*"}, 334 /*quirks*/ DA_Q_NO_6_BYTE 335 }, 336 { 337 /* 338 * Nikon Coolpix E885 Camera 339 */ 340 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Nikon", "Digital Camera", "*"}, 341 /*quirks*/ DA_Q_NO_6_BYTE 342 }, 343 { 344 /* 345 * Minolta Dimage 2330 346 */ 347 {T_DIRECT, SIP_MEDIA_REMOVABLE, "MINOLTA", "DIMAGE 2330*", "*"}, 348 /*quirks*/ DA_Q_NO_6_BYTE 349 }, 350 { 351 /* 352 * DIVA USB Mp3 Player. 353 * Doesn't work correctly with 6 byte reads/writes. 354 */ 355 {T_DIRECT, SIP_MEDIA_REMOVABLE, "DIVA USB", "Media Reader","*"}, 356 /*quirks*/ DA_Q_NO_6_BYTE 357 } 358 }; 359 360 static d_open_t daopen; 361 static d_close_t daclose; 362 static d_strategy_t dastrategy; 363 static d_ioctl_t daioctl; 364 static d_dump_t dadump; 365 static periph_init_t dainit; 366 static void daasync(void *callback_arg, u_int32_t code, 367 struct cam_path *path, void *arg); 368 static periph_ctor_t daregister; 369 static periph_dtor_t dacleanup; 370 static periph_start_t dastart; 371 static periph_oninv_t daoninvalidate; 372 static void dadone(struct cam_periph *periph, 373 union ccb *done_ccb); 374 static int daerror(union ccb *ccb, u_int32_t cam_flags, 375 u_int32_t sense_flags); 376 static void daprevent(struct cam_periph *periph, int action); 377 static void dasetgeom(struct cam_periph *periph, 378 struct scsi_read_capacity_data * rdcap); 379 static timeout_t dasendorderedtag; 380 static void dashutdown(void *arg, int howto); 381 382 #ifndef DA_DEFAULT_TIMEOUT 383 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 384 #endif 385 386 #ifndef DA_DEFAULT_RETRY 387 #define DA_DEFAULT_RETRY 4 388 #endif 389 390 static int da_retry_count = DA_DEFAULT_RETRY; 391 static int da_default_timeout = DA_DEFAULT_TIMEOUT; 392 393 SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, 394 "CAM Direct Access Disk driver"); 395 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW, 396 &da_retry_count, 0, "Normal I/O retry count"); 397 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW, 398 &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); 399 400 /* 401 * DA_ORDEREDTAG_INTERVAL determines how often, relative 402 * to the default timeout, we check to see whether an ordered 403 * tagged transaction is appropriate to prevent simple tag 404 * starvation. Since we'd like to ensure that there is at least 405 * 1/2 of the timeout length left for a starved transaction to 406 * complete after we've sent an ordered tag, we must poll at least 407 * four times in every timeout period. This takes care of the worst 408 * case where a starved transaction starts during an interval that 409 * meets the requirement "don't send an ordered tag" test so it takes 410 * us two intervals to determine that a tag must be sent. 411 */ 412 #ifndef DA_ORDEREDTAG_INTERVAL 413 #define DA_ORDEREDTAG_INTERVAL 4 414 #endif 415 416 static struct periph_driver dadriver = 417 { 418 dainit, "da", 419 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 420 }; 421 422 PERIPHDRIVER_DECLARE(da, dadriver); 423 424 #define DA_CDEV_MAJOR 13 425 426 /* For 2.2-stable support */ 427 #ifndef D_DISK 428 #define D_DISK 0 429 #endif 430 431 static struct cdevsw da_cdevsw = { 432 /* open */ daopen, 433 /* close */ daclose, 434 /* read */ physread, 435 /* write */ physwrite, 436 /* ioctl */ daioctl, 437 /* poll */ nopoll, 438 /* mmap */ nommap, 439 /* strategy */ dastrategy, 440 /* name */ "da", 441 /* maj */ DA_CDEV_MAJOR, 442 /* dump */ dadump, 443 /* psize */ nopsize, 444 /* flags */ D_DISK, 445 }; 446 447 static struct cdevsw dadisk_cdevsw; 448 449 static SLIST_HEAD(,da_softc) softc_list; 450 static struct extend_array *daperiphs; 451 452 static int 453 daopen(dev_t dev, int flags, int fmt, struct thread *td) 454 { 455 struct cam_periph *periph; 456 struct da_softc *softc; 457 struct disklabel *label; 458 struct scsi_read_capacity_data *rcap; 459 union ccb *ccb; 460 int unit; 461 int part; 462 int error; 463 int s; 464 465 unit = dkunit(dev); 466 part = dkpart(dev); 467 s = splsoftcam(); 468 periph = cam_extend_get(daperiphs, unit); 469 if (periph == NULL) { 470 splx(s); 471 return (ENXIO); 472 } 473 474 softc = (struct da_softc *)periph->softc; 475 476 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 477 ("daopen: dev=%s (unit %d , partition %d)\n", devtoname(dev), 478 unit, part)); 479 480 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) 481 return (error); /* error code from tsleep */ 482 483 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 484 return(ENXIO); 485 softc->flags |= DA_FLAG_OPEN; 486 487 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { 488 /* Invalidate our pack information. */ 489 disk_invalidate(&softc->disk); 490 softc->flags &= ~DA_FLAG_PACK_INVALID; 491 } 492 splx(s); 493 494 /* Do a read capacity */ 495 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 496 M_TEMP, 497 M_WAITOK); 498 499 ccb = cam_periph_getccb(periph, /*priority*/1); 500 scsi_read_capacity(&ccb->csio, 501 /*retries*/4, 502 /*cbfncp*/dadone, 503 MSG_SIMPLE_Q_TAG, 504 rcap, 505 SSD_FULL_SIZE, 506 /*timeout*/60000); 507 ccb->ccb_h.ccb_bp = NULL; 508 509 error = cam_periph_runccb(ccb, daerror, 510 /*cam_flags*/CAM_RETRY_SELTO, 511 /*sense_flags*/SF_RETRY_UA, 512 &softc->device_stats); 513 514 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 515 cam_release_devq(ccb->ccb_h.path, 516 /*relsim_flags*/0, 517 /*reduction*/0, 518 /*timeout*/0, 519 /*getcount_only*/0); 520 xpt_release_ccb(ccb); 521 522 if (error == 0) 523 dasetgeom(periph, rcap); 524 525 free(rcap, M_TEMP); 526 527 if (error == 0) { 528 struct ccb_getdev cgd; 529 530 /* Build label for whole disk. */ 531 label = &softc->disk.d_label; 532 bzero(label, sizeof(*label)); 533 label->d_type = DTYPE_SCSI; 534 535 /* 536 * Grab the inquiry data to get the vendor and product names. 537 * Put them in the typename and packname for the label. 538 */ 539 xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1); 540 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 541 xpt_action((union ccb *)&cgd); 542 543 strncpy(label->d_typename, cgd.inq_data.vendor, 544 min(SID_VENDOR_SIZE, sizeof(label->d_typename))); 545 strncpy(label->d_packname, cgd.inq_data.product, 546 min(SID_PRODUCT_SIZE, sizeof(label->d_packname))); 547 548 label->d_secsize = softc->params.secsize; 549 label->d_nsectors = softc->params.secs_per_track; 550 label->d_ntracks = softc->params.heads; 551 label->d_ncylinders = softc->params.cylinders; 552 label->d_secpercyl = softc->params.heads 553 * softc->params.secs_per_track; 554 label->d_secperunit = softc->params.sectors; 555 556 /* 557 * Check to see whether or not the blocksize is set yet. 558 * If it isn't, set it and then clear the blocksize 559 * unavailable flag for the device statistics. 560 */ 561 if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){ 562 softc->device_stats.block_size = softc->params.secsize; 563 softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE; 564 } 565 } 566 567 if (error == 0) { 568 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) 569 daprevent(periph, PR_PREVENT); 570 } 571 cam_periph_unlock(periph); 572 return (error); 573 } 574 575 static int 576 daclose(dev_t dev, int flag, int fmt, struct thread *td) 577 { 578 struct cam_periph *periph; 579 struct da_softc *softc; 580 int unit; 581 int error; 582 583 unit = dkunit(dev); 584 periph = cam_extend_get(daperiphs, unit); 585 if (periph == NULL) 586 return (ENXIO); 587 588 softc = (struct da_softc *)periph->softc; 589 590 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) { 591 return (error); /* error code from tsleep */ 592 } 593 594 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 595 union ccb *ccb; 596 597 ccb = cam_periph_getccb(periph, /*priority*/1); 598 599 scsi_synchronize_cache(&ccb->csio, 600 /*retries*/1, 601 /*cbfcnp*/dadone, 602 MSG_SIMPLE_Q_TAG, 603 /*begin_lba*/0,/* Cover the whole disk */ 604 /*lb_count*/0, 605 SSD_FULL_SIZE, 606 5 * 60 * 1000); 607 608 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 609 /*sense_flags*/SF_RETRY_UA, 610 &softc->device_stats); 611 612 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 613 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == 614 CAM_SCSI_STATUS_ERROR) { 615 int asc, ascq; 616 int sense_key, error_code; 617 618 scsi_extract_sense(&ccb->csio.sense_data, 619 &error_code, 620 &sense_key, 621 &asc, &ascq); 622 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 623 scsi_sense_print(&ccb->csio); 624 } else { 625 xpt_print_path(periph->path); 626 printf("Synchronize cache failed, status " 627 "== 0x%x, scsi status == 0x%x\n", 628 ccb->csio.ccb_h.status, 629 ccb->csio.scsi_status); 630 } 631 } 632 633 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 634 cam_release_devq(ccb->ccb_h.path, 635 /*relsim_flags*/0, 636 /*reduction*/0, 637 /*timeout*/0, 638 /*getcount_only*/0); 639 640 xpt_release_ccb(ccb); 641 642 } 643 644 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) { 645 daprevent(periph, PR_ALLOW); 646 /* 647 * If we've got removeable media, mark the blocksize as 648 * unavailable, since it could change when new media is 649 * inserted. 650 */ 651 softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE; 652 } 653 654 softc->flags &= ~DA_FLAG_OPEN; 655 cam_periph_unlock(periph); 656 cam_periph_release(periph); 657 return (0); 658 } 659 660 /* 661 * Actually translate the requested transfer into one the physical driver 662 * can understand. The transfer is described by a buf and will include 663 * only one physical transfer. 664 */ 665 static void 666 dastrategy(struct bio *bp) 667 { 668 struct cam_periph *periph; 669 struct da_softc *softc; 670 u_int unit; 671 u_int part; 672 int s; 673 674 unit = dkunit(bp->bio_dev); 675 part = dkpart(bp->bio_dev); 676 periph = cam_extend_get(daperiphs, unit); 677 if (periph == NULL) { 678 biofinish(bp, NULL, ENXIO); 679 return; 680 } 681 softc = (struct da_softc *)periph->softc; 682 #if 0 683 /* 684 * check it's not too big a transfer for our adapter 685 */ 686 scsi_minphys(bp,&sd_switch); 687 #endif 688 689 /* 690 * Mask interrupts so that the pack cannot be invalidated until 691 * after we are in the queue. Otherwise, we might not properly 692 * clean up one of the buffers. 693 */ 694 s = splbio(); 695 696 /* 697 * If the device has been made invalid, error out 698 */ 699 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 700 splx(s); 701 biofinish(bp, NULL, ENXIO); 702 return; 703 } 704 705 /* 706 * Place it in the queue of disk activities for this disk 707 */ 708 bioqdisksort(&softc->bio_queue, bp); 709 710 splx(s); 711 712 /* 713 * Schedule ourselves for performing the work. 714 */ 715 xpt_schedule(periph, /* XXX priority */1); 716 717 return; 718 } 719 720 /* For 2.2-stable support */ 721 #ifndef ENOIOCTL 722 #define ENOIOCTL -1 723 #endif 724 725 static int 726 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct thread *td) 727 { 728 struct cam_periph *periph; 729 struct da_softc *softc; 730 int unit; 731 int error; 732 733 unit = dkunit(dev); 734 periph = cam_extend_get(daperiphs, unit); 735 if (periph == NULL) 736 return (ENXIO); 737 738 softc = (struct da_softc *)periph->softc; 739 740 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n")); 741 742 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { 743 return (error); /* error code from tsleep */ 744 } 745 746 error = cam_periph_ioctl(periph, cmd, addr, daerror); 747 748 cam_periph_unlock(periph); 749 750 return (error); 751 } 752 753 static int 754 dadump(dev_t dev) 755 { 756 struct cam_periph *periph; 757 struct da_softc *softc; 758 u_int unit; 759 u_int part; 760 u_int secsize; 761 u_int num; /* number of sectors to write */ 762 u_int blknum; 763 long blkcnt; 764 vm_offset_t addr; 765 struct ccb_scsiio csio; 766 int dumppages = MAXDUMPPGS; 767 int error; 768 int i; 769 770 /* toss any characters present prior to dump */ 771 while (cncheckc() != -1) 772 ; 773 774 unit = dkunit(dev); 775 part = dkpart(dev); 776 periph = cam_extend_get(daperiphs, unit); 777 if (periph == NULL) { 778 return (ENXIO); 779 } 780 softc = (struct da_softc *)periph->softc; 781 782 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) 783 return (ENXIO); 784 785 error = disk_dumpcheck(dev, &num, &blknum, &secsize); 786 if (error) 787 return (error); 788 789 addr = 0; /* starting address */ 790 blkcnt = howmany(PAGE_SIZE, secsize); 791 792 while (num > 0) { 793 caddr_t va = NULL; 794 795 if ((num / blkcnt) < dumppages) 796 dumppages = num / blkcnt; 797 798 for (i = 0; i < dumppages; ++i) { 799 vm_offset_t a = addr + (i * PAGE_SIZE); 800 if (is_physical_memory(a)) 801 va = pmap_kenter_temporary(trunc_page(a), i); 802 else 803 va = pmap_kenter_temporary(trunc_page(0), i); 804 } 805 806 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 807 csio.ccb_h.ccb_state = DA_CCB_DUMP; 808 scsi_read_write(&csio, 809 /*retries*/1, 810 dadone, 811 MSG_ORDERED_Q_TAG, 812 /*read*/FALSE, 813 /*byte2*/0, 814 /*minimum_cmd_size*/ softc->minimum_cmd_size, 815 blknum, 816 blkcnt * dumppages, 817 /*data_ptr*/(u_int8_t *) va, 818 /*dxfer_len*/blkcnt * secsize * dumppages, 819 /*sense_len*/SSD_FULL_SIZE, 820 DA_DEFAULT_TIMEOUT * 1000); 821 xpt_polled_action((union ccb *)&csio); 822 823 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 824 printf("Aborting dump due to I/O error.\n"); 825 if ((csio.ccb_h.status & CAM_STATUS_MASK) == 826 CAM_SCSI_STATUS_ERROR) 827 scsi_sense_print(&csio); 828 else 829 printf("status == 0x%x, scsi status == 0x%x\n", 830 csio.ccb_h.status, csio.scsi_status); 831 return(EIO); 832 } 833 834 if (dumpstatus(addr, (off_t)num * softc->params.secsize) < 0) 835 return (EINTR); 836 837 /* update block count */ 838 num -= blkcnt * dumppages; 839 blknum += blkcnt * dumppages; 840 addr += PAGE_SIZE * dumppages; 841 } 842 843 /* 844 * Sync the disk cache contents to the physical media. 845 */ 846 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 847 848 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 849 csio.ccb_h.ccb_state = DA_CCB_DUMP; 850 scsi_synchronize_cache(&csio, 851 /*retries*/1, 852 /*cbfcnp*/dadone, 853 MSG_SIMPLE_Q_TAG, 854 /*begin_lba*/0,/* Cover the whole disk */ 855 /*lb_count*/0, 856 SSD_FULL_SIZE, 857 5 * 60 * 1000); 858 xpt_polled_action((union ccb *)&csio); 859 860 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 861 if ((csio.ccb_h.status & CAM_STATUS_MASK) == 862 CAM_SCSI_STATUS_ERROR) { 863 int asc, ascq; 864 int sense_key, error_code; 865 866 scsi_extract_sense(&csio.sense_data, 867 &error_code, 868 &sense_key, 869 &asc, &ascq); 870 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 871 scsi_sense_print(&csio); 872 } else { 873 xpt_print_path(periph->path); 874 printf("Synchronize cache failed, status " 875 "== 0x%x, scsi status == 0x%x\n", 876 csio.ccb_h.status, csio.scsi_status); 877 } 878 } 879 } 880 return (0); 881 } 882 883 static void 884 dainit(void) 885 { 886 cam_status status; 887 struct cam_path *path; 888 889 /* 890 * Create our extend array for storing the devices we attach to. 891 */ 892 daperiphs = cam_extend_new(); 893 SLIST_INIT(&softc_list); 894 if (daperiphs == NULL) { 895 printf("da: Failed to alloc extend array!\n"); 896 return; 897 } 898 899 /* 900 * Install a global async callback. This callback will 901 * receive async callbacks like "new device found". 902 */ 903 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 904 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 905 906 if (status == CAM_REQ_CMP) { 907 struct ccb_setasync csa; 908 909 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 910 csa.ccb_h.func_code = XPT_SASYNC_CB; 911 csa.event_enable = AC_FOUND_DEVICE; 912 csa.callback = daasync; 913 csa.callback_arg = NULL; 914 xpt_action((union ccb *)&csa); 915 status = csa.ccb_h.status; 916 xpt_free_path(path); 917 } 918 919 if (status != CAM_REQ_CMP) { 920 printf("da: Failed to attach master async callback " 921 "due to status 0x%x!\n", status); 922 } else { 923 924 /* 925 * Schedule a periodic event to occasionally send an 926 * ordered tag to a device. 927 */ 928 timeout(dasendorderedtag, NULL, 929 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL); 930 931 /* Register our shutdown event handler */ 932 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, 933 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 934 printf("dainit: shutdown event registration failed!\n"); 935 } 936 } 937 938 static void 939 daoninvalidate(struct cam_periph *periph) 940 { 941 int s; 942 struct da_softc *softc; 943 struct bio *q_bp; 944 struct ccb_setasync csa; 945 946 softc = (struct da_softc *)periph->softc; 947 948 /* 949 * De-register any async callbacks. 950 */ 951 xpt_setup_ccb(&csa.ccb_h, periph->path, 952 /* priority */ 5); 953 csa.ccb_h.func_code = XPT_SASYNC_CB; 954 csa.event_enable = 0; 955 csa.callback = daasync; 956 csa.callback_arg = periph; 957 xpt_action((union ccb *)&csa); 958 959 softc->flags |= DA_FLAG_PACK_INVALID; 960 961 /* 962 * Although the oninvalidate() routines are always called at 963 * splsoftcam, we need to be at splbio() here to keep the buffer 964 * queue from being modified while we traverse it. 965 */ 966 s = splbio(); 967 968 /* 969 * Return all queued I/O with ENXIO. 970 * XXX Handle any transactions queued to the card 971 * with XPT_ABORT_CCB. 972 */ 973 while ((q_bp = bioq_first(&softc->bio_queue)) != NULL){ 974 bioq_remove(&softc->bio_queue, q_bp); 975 q_bp->bio_resid = q_bp->bio_bcount; 976 biofinish(q_bp, NULL, ENXIO); 977 } 978 splx(s); 979 980 SLIST_REMOVE(&softc_list, softc, da_softc, links); 981 982 xpt_print_path(periph->path); 983 printf("lost device\n"); 984 } 985 986 static void 987 dacleanup(struct cam_periph *periph) 988 { 989 struct da_softc *softc; 990 991 softc = (struct da_softc *)periph->softc; 992 993 devstat_remove_entry(&softc->device_stats); 994 cam_extend_release(daperiphs, periph->unit_number); 995 xpt_print_path(periph->path); 996 printf("removing device entry\n"); 997 if (softc->dev) { 998 disk_destroy(softc->dev); 999 } 1000 free(softc, M_DEVBUF); 1001 } 1002 1003 static void 1004 daasync(void *callback_arg, u_int32_t code, 1005 struct cam_path *path, void *arg) 1006 { 1007 struct cam_periph *periph; 1008 1009 periph = (struct cam_periph *)callback_arg; 1010 switch (code) { 1011 case AC_FOUND_DEVICE: 1012 { 1013 struct ccb_getdev *cgd; 1014 cam_status status; 1015 1016 cgd = (struct ccb_getdev *)arg; 1017 if (cgd == NULL) 1018 break; 1019 1020 if (SID_TYPE(&cgd->inq_data) != T_DIRECT 1021 && SID_TYPE(&cgd->inq_data) != T_OPTICAL) 1022 break; 1023 1024 /* 1025 * Allocate a peripheral instance for 1026 * this device and start the probe 1027 * process. 1028 */ 1029 status = cam_periph_alloc(daregister, daoninvalidate, 1030 dacleanup, dastart, 1031 "da", CAM_PERIPH_BIO, 1032 cgd->ccb_h.path, daasync, 1033 AC_FOUND_DEVICE, cgd); 1034 1035 if (status != CAM_REQ_CMP 1036 && status != CAM_REQ_INPROG) 1037 printf("daasync: Unable to attach to new device " 1038 "due to status 0x%x\n", status); 1039 break; 1040 } 1041 case AC_SENT_BDR: 1042 case AC_BUS_RESET: 1043 { 1044 struct da_softc *softc; 1045 struct ccb_hdr *ccbh; 1046 int s; 1047 1048 softc = (struct da_softc *)periph->softc; 1049 s = splsoftcam(); 1050 /* 1051 * Don't fail on the expected unit attention 1052 * that will occur. 1053 */ 1054 softc->flags |= DA_FLAG_RETRY_UA; 1055 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) 1056 ccbh->ccb_state |= DA_CCB_RETRY_UA; 1057 splx(s); 1058 /* FALLTHROUGH*/ 1059 } 1060 default: 1061 cam_periph_async(periph, code, path, arg); 1062 break; 1063 } 1064 } 1065 1066 static cam_status 1067 daregister(struct cam_periph *periph, void *arg) 1068 { 1069 int s; 1070 struct da_softc *softc; 1071 struct ccb_setasync csa; 1072 struct ccb_getdev *cgd; 1073 caddr_t match; 1074 1075 cgd = (struct ccb_getdev *)arg; 1076 if (periph == NULL) { 1077 printf("daregister: periph was NULL!!\n"); 1078 return(CAM_REQ_CMP_ERR); 1079 } 1080 1081 if (cgd == NULL) { 1082 printf("daregister: no getdev CCB, can't register device\n"); 1083 return(CAM_REQ_CMP_ERR); 1084 } 1085 1086 softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); 1087 1088 if (softc == NULL) { 1089 printf("daregister: Unable to probe new device. " 1090 "Unable to allocate softc\n"); 1091 return(CAM_REQ_CMP_ERR); 1092 } 1093 1094 bzero(softc, sizeof(*softc)); 1095 LIST_INIT(&softc->pending_ccbs); 1096 softc->state = DA_STATE_PROBE; 1097 bioq_init(&softc->bio_queue); 1098 if (SID_IS_REMOVABLE(&cgd->inq_data)) 1099 softc->flags |= DA_FLAG_PACK_REMOVABLE; 1100 if ((cgd->inq_data.flags & SID_CmdQue) != 0) 1101 softc->flags |= DA_FLAG_TAGGED_QUEUING; 1102 1103 periph->softc = softc; 1104 1105 cam_extend_set(daperiphs, periph->unit_number, periph); 1106 1107 /* 1108 * See if this device has any quirks. 1109 */ 1110 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 1111 (caddr_t)da_quirk_table, 1112 sizeof(da_quirk_table)/sizeof(*da_quirk_table), 1113 sizeof(*da_quirk_table), scsi_inquiry_match); 1114 1115 if (match != NULL) 1116 softc->quirks = ((struct da_quirk_entry *)match)->quirks; 1117 else 1118 softc->quirks = DA_Q_NONE; 1119 1120 if (softc->quirks & DA_Q_NO_6_BYTE) 1121 softc->minimum_cmd_size = 10; 1122 else 1123 softc->minimum_cmd_size = 6; 1124 1125 /* 1126 * Block our timeout handler while we 1127 * add this softc to the dev list. 1128 */ 1129 s = splsoftclock(); 1130 SLIST_INSERT_HEAD(&softc_list, softc, links); 1131 splx(s); 1132 1133 /* 1134 * The DA driver supports a blocksize, but 1135 * we don't know the blocksize until we do 1136 * a read capacity. So, set a flag to 1137 * indicate that the blocksize is 1138 * unavailable right now. We'll clear the 1139 * flag as soon as we've done a read capacity. 1140 */ 1141 devstat_add_entry(&softc->device_stats, "da", 1142 periph->unit_number, 0, 1143 DEVSTAT_BS_UNAVAILABLE, 1144 SID_TYPE(&cgd->inq_data) | DEVSTAT_TYPE_IF_SCSI, 1145 DEVSTAT_PRIORITY_DISK); 1146 1147 /* 1148 * Register this media as a disk 1149 */ 1150 softc->dev = disk_create(periph->unit_number, &softc->disk, 0, 1151 &da_cdevsw, &dadisk_cdevsw); 1152 1153 /* 1154 * Add async callbacks for bus reset and 1155 * bus device reset calls. I don't bother 1156 * checking if this fails as, in most cases, 1157 * the system will function just fine without 1158 * them and the only alternative would be to 1159 * not attach the device on failure. 1160 */ 1161 xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); 1162 csa.ccb_h.func_code = XPT_SASYNC_CB; 1163 csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; 1164 csa.callback = daasync; 1165 csa.callback_arg = periph; 1166 xpt_action((union ccb *)&csa); 1167 /* 1168 * Lock this peripheral until we are setup. 1169 * This first call can't block 1170 */ 1171 (void)cam_periph_lock(periph, PRIBIO); 1172 xpt_schedule(periph, /*priority*/5); 1173 1174 return(CAM_REQ_CMP); 1175 } 1176 1177 static void 1178 dastart(struct cam_periph *periph, union ccb *start_ccb) 1179 { 1180 struct da_softc *softc; 1181 1182 softc = (struct da_softc *)periph->softc; 1183 1184 1185 switch (softc->state) { 1186 case DA_STATE_NORMAL: 1187 { 1188 /* Pull a buffer from the queue and get going on it */ 1189 struct bio *bp; 1190 int s; 1191 1192 /* 1193 * See if there is a buf with work for us to do.. 1194 */ 1195 s = splbio(); 1196 bp = bioq_first(&softc->bio_queue); 1197 if (periph->immediate_priority <= periph->pinfo.priority) { 1198 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 1199 ("queuing for immediate ccb\n")); 1200 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING; 1201 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 1202 periph_links.sle); 1203 periph->immediate_priority = CAM_PRIORITY_NONE; 1204 splx(s); 1205 wakeup(&periph->ccb_list); 1206 } else if (bp == NULL) { 1207 splx(s); 1208 xpt_release_ccb(start_ccb); 1209 } else { 1210 int oldspl; 1211 u_int8_t tag_code; 1212 1213 bioq_remove(&softc->bio_queue, bp); 1214 1215 devstat_start_transaction(&softc->device_stats); 1216 1217 if ((bp->bio_flags & BIO_ORDERED) != 0 1218 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { 1219 softc->flags &= ~DA_FLAG_NEED_OTAG; 1220 softc->ordered_tag_count++; 1221 tag_code = MSG_ORDERED_Q_TAG; 1222 } else { 1223 tag_code = MSG_SIMPLE_Q_TAG; 1224 } 1225 scsi_read_write(&start_ccb->csio, 1226 /*retries*/da_retry_count, 1227 dadone, 1228 tag_code, 1229 bp->bio_cmd == BIO_READ, 1230 /*byte2*/0, 1231 softc->minimum_cmd_size, 1232 bp->bio_pblkno, 1233 bp->bio_bcount / softc->params.secsize, 1234 bp->bio_data, 1235 bp->bio_bcount, 1236 /*sense_len*/SSD_FULL_SIZE, 1237 da_default_timeout * 1000); 1238 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 1239 1240 /* 1241 * Block out any asyncronous callbacks 1242 * while we touch the pending ccb list. 1243 */ 1244 oldspl = splcam(); 1245 LIST_INSERT_HEAD(&softc->pending_ccbs, 1246 &start_ccb->ccb_h, periph_links.le); 1247 splx(oldspl); 1248 1249 /* We expect a unit attention from this device */ 1250 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 1251 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 1252 softc->flags &= ~DA_FLAG_RETRY_UA; 1253 } 1254 1255 start_ccb->ccb_h.ccb_bp = bp; 1256 bp = bioq_first(&softc->bio_queue); 1257 splx(s); 1258 1259 xpt_action(start_ccb); 1260 } 1261 1262 if (bp != NULL) { 1263 /* Have more work to do, so ensure we stay scheduled */ 1264 xpt_schedule(periph, /* XXX priority */1); 1265 } 1266 break; 1267 } 1268 case DA_STATE_PROBE: 1269 { 1270 struct ccb_scsiio *csio; 1271 struct scsi_read_capacity_data *rcap; 1272 1273 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 1274 M_TEMP, 1275 M_NOWAIT); 1276 if (rcap == NULL) { 1277 printf("dastart: Couldn't malloc read_capacity data\n"); 1278 /* da_free_periph??? */ 1279 break; 1280 } 1281 csio = &start_ccb->csio; 1282 scsi_read_capacity(csio, 1283 /*retries*/4, 1284 dadone, 1285 MSG_SIMPLE_Q_TAG, 1286 rcap, 1287 SSD_FULL_SIZE, 1288 /*timeout*/5000); 1289 start_ccb->ccb_h.ccb_bp = NULL; 1290 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE; 1291 xpt_action(start_ccb); 1292 break; 1293 } 1294 } 1295 } 1296 1297 1298 static void 1299 dadone(struct cam_periph *periph, union ccb *done_ccb) 1300 { 1301 struct da_softc *softc; 1302 struct ccb_scsiio *csio; 1303 1304 softc = (struct da_softc *)periph->softc; 1305 csio = &done_ccb->csio; 1306 switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) { 1307 case DA_CCB_BUFFER_IO: 1308 { 1309 struct bio *bp; 1310 int oldspl; 1311 1312 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 1313 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1314 int error; 1315 int s; 1316 int sf; 1317 1318 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 1319 sf = SF_RETRY_UA; 1320 else 1321 sf = 0; 1322 1323 error = daerror(done_ccb, CAM_RETRY_SELTO, sf); 1324 if (error == ERESTART) { 1325 /* 1326 * A retry was scheuled, so 1327 * just return. 1328 */ 1329 return; 1330 } 1331 if (error != 0) { 1332 struct bio *q_bp; 1333 1334 s = splbio(); 1335 1336 if (error == ENXIO) { 1337 /* 1338 * Catastrophic error. Mark our pack as 1339 * invalid. 1340 */ 1341 /* XXX See if this is really a media 1342 * change first. 1343 */ 1344 xpt_print_path(periph->path); 1345 printf("Invalidating pack\n"); 1346 softc->flags |= DA_FLAG_PACK_INVALID; 1347 } 1348 1349 /* 1350 * return all queued I/O with EIO, so that 1351 * the client can retry these I/Os in the 1352 * proper order should it attempt to recover. 1353 */ 1354 while ((q_bp = bioq_first(&softc->bio_queue)) 1355 != NULL) { 1356 bioq_remove(&softc->bio_queue, q_bp); 1357 q_bp->bio_resid = q_bp->bio_bcount; 1358 biofinish(q_bp, NULL, EIO); 1359 } 1360 splx(s); 1361 bp->bio_error = error; 1362 bp->bio_resid = bp->bio_bcount; 1363 bp->bio_flags |= BIO_ERROR; 1364 } else { 1365 bp->bio_resid = csio->resid; 1366 bp->bio_error = 0; 1367 if (bp->bio_resid != 0) { 1368 /* Short transfer ??? */ 1369 bp->bio_flags |= BIO_ERROR; 1370 } 1371 } 1372 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1373 cam_release_devq(done_ccb->ccb_h.path, 1374 /*relsim_flags*/0, 1375 /*reduction*/0, 1376 /*timeout*/0, 1377 /*getcount_only*/0); 1378 } else { 1379 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1380 panic("REQ_CMP with QFRZN"); 1381 bp->bio_resid = csio->resid; 1382 if (csio->resid > 0) 1383 bp->bio_flags |= BIO_ERROR; 1384 } 1385 1386 /* 1387 * Block out any asyncronous callbacks 1388 * while we touch the pending ccb list. 1389 */ 1390 oldspl = splcam(); 1391 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 1392 splx(oldspl); 1393 1394 if (softc->device_stats.busy_count == 0) 1395 softc->flags |= DA_FLAG_WENT_IDLE; 1396 1397 biofinish(bp, &softc->device_stats, 0); 1398 break; 1399 } 1400 case DA_CCB_PROBE: 1401 { 1402 struct scsi_read_capacity_data *rdcap; 1403 char announce_buf[80]; 1404 1405 rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; 1406 1407 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 1408 struct disk_params *dp; 1409 1410 dasetgeom(periph, rdcap); 1411 dp = &softc->params; 1412 snprintf(announce_buf, sizeof(announce_buf), 1413 "%luMB (%u %u byte sectors: %dH %dS/T %dC)", 1414 (unsigned long) (((u_int64_t)dp->secsize * 1415 dp->sectors) / (1024*1024)), dp->sectors, 1416 dp->secsize, dp->heads, dp->secs_per_track, 1417 dp->cylinders); 1418 } else { 1419 int error; 1420 1421 announce_buf[0] = '\0'; 1422 1423 /* 1424 * Retry any UNIT ATTENTION type errors. They 1425 * are expected at boot. 1426 */ 1427 error = daerror(done_ccb, CAM_RETRY_SELTO, 1428 SF_RETRY_UA|SF_NO_PRINT); 1429 if (error == ERESTART) { 1430 /* 1431 * A retry was scheuled, so 1432 * just return. 1433 */ 1434 return; 1435 } else if (error != 0) { 1436 struct scsi_sense_data *sense; 1437 int asc, ascq; 1438 int sense_key, error_code; 1439 int have_sense; 1440 cam_status status; 1441 struct ccb_getdev cgd; 1442 1443 /* Don't wedge this device's queue */ 1444 status = done_ccb->ccb_h.status; 1445 if ((status & CAM_DEV_QFRZN) != 0) 1446 cam_release_devq(done_ccb->ccb_h.path, 1447 /*relsim_flags*/0, 1448 /*reduction*/0, 1449 /*timeout*/0, 1450 /*getcount_only*/0); 1451 1452 1453 xpt_setup_ccb(&cgd.ccb_h, 1454 done_ccb->ccb_h.path, 1455 /* priority */ 1); 1456 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1457 xpt_action((union ccb *)&cgd); 1458 1459 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0) 1460 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0) 1461 || ((status & CAM_AUTOSNS_VALID) == 0)) 1462 have_sense = FALSE; 1463 else 1464 have_sense = TRUE; 1465 1466 if (have_sense) { 1467 sense = &csio->sense_data; 1468 scsi_extract_sense(sense, &error_code, 1469 &sense_key, 1470 &asc, &ascq); 1471 } 1472 /* 1473 * Attach to anything that claims to be a 1474 * direct access or optical disk device, 1475 * as long as it doesn't return a "Logical 1476 * unit not supported" (0x25) error. 1477 */ 1478 if ((have_sense) && (asc != 0x25) 1479 && (error_code == SSD_CURRENT_ERROR)) { 1480 const char *sense_key_desc; 1481 const char *asc_desc; 1482 1483 scsi_sense_desc(sense_key, asc, ascq, 1484 &cgd.inq_data, 1485 &sense_key_desc, 1486 &asc_desc); 1487 snprintf(announce_buf, 1488 sizeof(announce_buf), 1489 "Attempt to query device " 1490 "size failed: %s, %s", 1491 sense_key_desc, 1492 asc_desc); 1493 } else { 1494 if (have_sense) 1495 scsi_sense_print( 1496 &done_ccb->csio); 1497 else { 1498 xpt_print_path(periph->path); 1499 printf("got CAM status %#x\n", 1500 done_ccb->ccb_h.status); 1501 } 1502 1503 xpt_print_path(periph->path); 1504 printf("fatal error, failed" 1505 " to attach to device\n"); 1506 1507 /* 1508 * Free up resources. 1509 */ 1510 cam_periph_invalidate(periph); 1511 } 1512 } 1513 } 1514 free(rdcap, M_TEMP); 1515 if (announce_buf[0] != '\0') 1516 xpt_announce_periph(periph, announce_buf); 1517 softc->state = DA_STATE_NORMAL; 1518 /* 1519 * Since our peripheral may be invalidated by an error 1520 * above or an external event, we must release our CCB 1521 * before releasing the probe lock on the peripheral. 1522 * The peripheral will only go away once the last lock 1523 * is removed, and we need it around for the CCB release 1524 * operation. 1525 */ 1526 xpt_release_ccb(done_ccb); 1527 cam_periph_unlock(periph); 1528 return; 1529 } 1530 case DA_CCB_WAITING: 1531 { 1532 /* Caller will release the CCB */ 1533 wakeup(&done_ccb->ccb_h.cbfcnp); 1534 return; 1535 } 1536 case DA_CCB_DUMP: 1537 /* No-op. We're polling */ 1538 return; 1539 default: 1540 break; 1541 } 1542 xpt_release_ccb(done_ccb); 1543 } 1544 1545 static int 1546 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1547 { 1548 struct da_softc *softc; 1549 struct cam_periph *periph; 1550 1551 periph = xpt_path_periph(ccb->ccb_h.path); 1552 softc = (struct da_softc *)periph->softc; 1553 1554 /* 1555 * XXX 1556 * Until we have a better way of doing pack validation, 1557 * don't treat UAs as errors. 1558 */ 1559 sense_flags |= SF_RETRY_UA; 1560 return(cam_periph_error(ccb, cam_flags, sense_flags, 1561 &softc->saved_ccb)); 1562 } 1563 1564 static void 1565 daprevent(struct cam_periph *periph, int action) 1566 { 1567 struct da_softc *softc; 1568 union ccb *ccb; 1569 int error; 1570 1571 softc = (struct da_softc *)periph->softc; 1572 1573 if (((action == PR_ALLOW) 1574 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 1575 || ((action == PR_PREVENT) 1576 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 1577 return; 1578 } 1579 1580 ccb = cam_periph_getccb(periph, /*priority*/1); 1581 1582 scsi_prevent(&ccb->csio, 1583 /*retries*/1, 1584 /*cbcfp*/dadone, 1585 MSG_SIMPLE_Q_TAG, 1586 action, 1587 SSD_FULL_SIZE, 1588 5000); 1589 1590 error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO, 1591 SF_RETRY_UA, &softc->device_stats); 1592 1593 if (error == 0) { 1594 if (action == PR_ALLOW) 1595 softc->flags &= ~DA_FLAG_PACK_LOCKED; 1596 else 1597 softc->flags |= DA_FLAG_PACK_LOCKED; 1598 } 1599 1600 xpt_release_ccb(ccb); 1601 } 1602 1603 static void 1604 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap) 1605 { 1606 struct ccb_calc_geometry ccg; 1607 struct da_softc *softc; 1608 struct disk_params *dp; 1609 1610 softc = (struct da_softc *)periph->softc; 1611 1612 dp = &softc->params; 1613 dp->secsize = scsi_4btoul(rdcap->length); 1614 dp->sectors = scsi_4btoul(rdcap->addr) + 1; 1615 /* 1616 * Have the controller provide us with a geometry 1617 * for this disk. The only time the geometry 1618 * matters is when we boot and the controller 1619 * is the only one knowledgeable enough to come 1620 * up with something that will make this a bootable 1621 * device. 1622 */ 1623 xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1); 1624 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 1625 ccg.block_size = dp->secsize; 1626 ccg.volume_size = dp->sectors; 1627 ccg.heads = 0; 1628 ccg.secs_per_track = 0; 1629 ccg.cylinders = 0; 1630 xpt_action((union ccb*)&ccg); 1631 dp->heads = ccg.heads; 1632 dp->secs_per_track = ccg.secs_per_track; 1633 dp->cylinders = ccg.cylinders; 1634 } 1635 1636 static void 1637 dasendorderedtag(void *arg) 1638 { 1639 struct da_softc *softc; 1640 int s; 1641 1642 for (softc = SLIST_FIRST(&softc_list); 1643 softc != NULL; 1644 softc = SLIST_NEXT(softc, links)) { 1645 s = splsoftcam(); 1646 if ((softc->ordered_tag_count == 0) 1647 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) { 1648 softc->flags |= DA_FLAG_NEED_OTAG; 1649 } 1650 if (softc->device_stats.busy_count > 0) 1651 softc->flags &= ~DA_FLAG_WENT_IDLE; 1652 1653 softc->ordered_tag_count = 0; 1654 splx(s); 1655 } 1656 /* Queue us up again */ 1657 timeout(dasendorderedtag, NULL, 1658 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL); 1659 } 1660 1661 /* 1662 * Step through all DA peripheral drivers, and if the device is still open, 1663 * sync the disk cache to physical media. 1664 */ 1665 static void 1666 dashutdown(void * arg, int howto) 1667 { 1668 struct cam_periph *periph; 1669 struct da_softc *softc; 1670 1671 TAILQ_FOREACH(periph, &dadriver.units, unit_links) { 1672 union ccb ccb; 1673 softc = (struct da_softc *)periph->softc; 1674 1675 /* 1676 * We only sync the cache if the drive is still open, and 1677 * if the drive is capable of it.. 1678 */ 1679 if (((softc->flags & DA_FLAG_OPEN) == 0) 1680 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) 1681 continue; 1682 1683 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1); 1684 1685 ccb.ccb_h.ccb_state = DA_CCB_DUMP; 1686 scsi_synchronize_cache(&ccb.csio, 1687 /*retries*/1, 1688 /*cbfcnp*/dadone, 1689 MSG_SIMPLE_Q_TAG, 1690 /*begin_lba*/0, /* whole disk */ 1691 /*lb_count*/0, 1692 SSD_FULL_SIZE, 1693 60 * 60 * 1000); 1694 1695 xpt_polled_action(&ccb); 1696 1697 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1698 if (((ccb.ccb_h.status & CAM_STATUS_MASK) == 1699 CAM_SCSI_STATUS_ERROR) 1700 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){ 1701 int error_code, sense_key, asc, ascq; 1702 1703 scsi_extract_sense(&ccb.csio.sense_data, 1704 &error_code, &sense_key, 1705 &asc, &ascq); 1706 1707 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 1708 scsi_sense_print(&ccb.csio); 1709 } else { 1710 xpt_print_path(periph->path); 1711 printf("Synchronize cache failed, status " 1712 "== 0x%x, scsi status == 0x%x\n", 1713 ccb.ccb_h.status, ccb.csio.scsi_status); 1714 } 1715 } 1716 1717 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 1718 cam_release_devq(ccb.ccb_h.path, 1719 /*relsim_flags*/0, 1720 /*reduction*/0, 1721 /*timeout*/0, 1722 /*getcount_only*/0); 1723 1724 } 1725 } 1726 1727 #else /* !_KERNEL */ 1728 1729 /* 1730 * XXX This is only left out of the kernel build to silence warnings. If, 1731 * for some reason this function is used in the kernel, the ifdefs should 1732 * be moved so it is included both in the kernel and userland. 1733 */ 1734 void 1735 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, 1736 void (*cbfcnp)(struct cam_periph *, union ccb *), 1737 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, 1738 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 1739 u_int32_t timeout) 1740 { 1741 struct scsi_format_unit *scsi_cmd; 1742 1743 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; 1744 scsi_cmd->opcode = FORMAT_UNIT; 1745 scsi_cmd->byte2 = byte2; 1746 scsi_ulto2b(ileave, scsi_cmd->interleave); 1747 1748 cam_fill_csio(csio, 1749 retries, 1750 cbfcnp, 1751 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 1752 tag_action, 1753 data_ptr, 1754 dxfer_len, 1755 sense_len, 1756 sizeof(*scsi_cmd), 1757 timeout); 1758 } 1759 1760 #endif /* _KERNEL */ 1761