1 /*- 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * Copyright (c) 1997 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 34 #ifdef _KERNEL 35 #include <sys/systm.h> 36 #include <sys/kernel.h> 37 #include <sys/bio.h> 38 #include <sys/sysctl.h> 39 #include <sys/taskqueue.h> 40 #endif /* _KERNEL */ 41 42 #include <sys/devicestat.h> 43 #include <sys/conf.h> 44 #include <sys/eventhandler.h> 45 #include <sys/malloc.h> 46 #include <sys/cons.h> 47 48 #include <machine/md_var.h> 49 50 #include <vm/vm.h> 51 #include <vm/pmap.h> 52 53 #include <geom/geom_disk.h> 54 55 #ifndef _KERNEL 56 #include <stdio.h> 57 #include <string.h> 58 #endif /* _KERNEL */ 59 60 #include <cam/cam.h> 61 #include <cam/cam_ccb.h> 62 #include <cam/cam_periph.h> 63 #include <cam/cam_xpt_periph.h> 64 65 #include <cam/scsi/scsi_message.h> 66 67 #ifndef _KERNEL 68 #include <cam/scsi/scsi_da.h> 69 #endif /* !_KERNEL */ 70 71 #ifdef _KERNEL 72 typedef enum { 73 DA_STATE_PROBE, 74 DA_STATE_PROBE2, 75 DA_STATE_NORMAL 76 } da_state; 77 78 typedef enum { 79 DA_FLAG_PACK_INVALID = 0x001, 80 DA_FLAG_NEW_PACK = 0x002, 81 DA_FLAG_PACK_LOCKED = 0x004, 82 DA_FLAG_PACK_REMOVABLE = 0x008, 83 DA_FLAG_TAGGED_QUEUING = 0x010, 84 DA_FLAG_NEED_OTAG = 0x020, 85 DA_FLAG_WENT_IDLE = 0x040, 86 DA_FLAG_RETRY_UA = 0x080, 87 DA_FLAG_OPEN = 0x100, 88 DA_FLAG_SCTX_INIT = 0x200 89 } da_flags; 90 91 typedef enum { 92 DA_Q_NONE = 0x00, 93 DA_Q_NO_SYNC_CACHE = 0x01, 94 DA_Q_NO_6_BYTE = 0x02, 95 DA_Q_NO_PREVENT = 0x04 96 } da_quirks; 97 98 typedef enum { 99 DA_CCB_PROBE = 0x01, 100 DA_CCB_PROBE2 = 0x02, 101 DA_CCB_BUFFER_IO = 0x03, 102 DA_CCB_WAITING = 0x04, 103 DA_CCB_DUMP = 0x05, 104 DA_CCB_TYPE_MASK = 0x0F, 105 DA_CCB_RETRY_UA = 0x10 106 } da_ccb_state; 107 108 /* Offsets into our private area for storing information */ 109 #define ccb_state ppriv_field0 110 #define ccb_bp ppriv_ptr1 111 112 struct disk_params { 113 u_int8_t heads; 114 u_int32_t cylinders; 115 u_int8_t secs_per_track; 116 u_int32_t secsize; /* Number of bytes/sector */ 117 u_int64_t sectors; /* total number sectors */ 118 }; 119 120 struct da_softc { 121 struct bio_queue_head bio_queue; 122 SLIST_ENTRY(da_softc) links; 123 LIST_HEAD(, ccb_hdr) pending_ccbs; 124 da_state state; 125 da_flags flags; 126 da_quirks quirks; 127 int minimum_cmd_size; 128 int ordered_tag_count; 129 int outstanding_cmds; 130 struct disk_params params; 131 struct disk *disk; 132 union ccb saved_ccb; 133 struct task sysctl_task; 134 struct sysctl_ctx_list sysctl_ctx; 135 struct sysctl_oid *sysctl_tree; 136 }; 137 138 struct da_quirk_entry { 139 struct scsi_inquiry_pattern inq_pat; 140 da_quirks quirks; 141 }; 142 143 static const char quantum[] = "QUANTUM"; 144 static const char microp[] = "MICROP"; 145 146 static struct da_quirk_entry da_quirk_table[] = 147 { 148 /* SPI, FC devices */ 149 { 150 /* 151 * Fujitsu M2513A MO drives. 152 * Tested devices: M2513A2 firmware versions 1200 & 1300. 153 * (dip switch selects whether T_DIRECT or T_OPTICAL device) 154 * Reported by: W.Scholten <whs@xs4all.nl> 155 */ 156 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 157 /*quirks*/ DA_Q_NO_SYNC_CACHE 158 }, 159 { 160 /* See above. */ 161 {T_OPTICAL, SIP_MEDIA_REMOVABLE, "FUJITSU", "M2513A", "*"}, 162 /*quirks*/ DA_Q_NO_SYNC_CACHE 163 }, 164 { 165 /* 166 * This particular Fujitsu drive doesn't like the 167 * synchronize cache command. 168 * Reported by: Tom Jackson <toj@gorilla.net> 169 */ 170 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, 171 /*quirks*/ DA_Q_NO_SYNC_CACHE 172 173 }, 174 { 175 /* 176 * This drive doesn't like the synchronize cache command 177 * either. Reported by: Matthew Jacob <mjacob@feral.com> 178 * in NetBSD PR kern/6027, August 24, 1998. 179 */ 180 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2217*", "*"}, 181 /*quirks*/ DA_Q_NO_SYNC_CACHE 182 }, 183 { 184 /* 185 * This drive doesn't like the synchronize cache command 186 * either. Reported by: Hellmuth Michaelis (hm@kts.org) 187 * (PR 8882). 188 */ 189 {T_DIRECT, SIP_MEDIA_FIXED, microp, "2112*", "*"}, 190 /*quirks*/ DA_Q_NO_SYNC_CACHE 191 }, 192 { 193 /* 194 * Doesn't like the synchronize cache command. 195 * Reported by: Blaz Zupan <blaz@gold.amis.net> 196 */ 197 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, 198 /*quirks*/ DA_Q_NO_SYNC_CACHE 199 }, 200 { 201 /* 202 * Doesn't like the synchronize cache command. 203 * Reported by: Blaz Zupan <blaz@gold.amis.net> 204 */ 205 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "MAVERICK 540S", "*"}, 206 /*quirks*/ DA_Q_NO_SYNC_CACHE 207 }, 208 { 209 /* 210 * Doesn't like the synchronize cache command. 211 */ 212 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS525S", "*"}, 213 /*quirks*/ DA_Q_NO_SYNC_CACHE 214 }, 215 { 216 /* 217 * Doesn't like the synchronize cache command. 218 * Reported by: walter@pelissero.de 219 */ 220 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "LPS540S", "*"}, 221 /*quirks*/ DA_Q_NO_SYNC_CACHE 222 }, 223 { 224 /* 225 * Doesn't work correctly with 6 byte reads/writes. 226 * Returns illegal request, and points to byte 9 of the 227 * 6-byte CDB. 228 * Reported by: Adam McDougall <bsdx@spawnet.com> 229 */ 230 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 4*", "*"}, 231 /*quirks*/ DA_Q_NO_6_BYTE 232 }, 233 { 234 /* See above. */ 235 {T_DIRECT, SIP_MEDIA_FIXED, quantum, "VIKING 2*", "*"}, 236 /*quirks*/ DA_Q_NO_6_BYTE 237 }, 238 { 239 /* 240 * Doesn't like the synchronize cache command. 241 * Reported by: walter@pelissero.de 242 */ 243 {T_DIRECT, SIP_MEDIA_FIXED, "CONNER", "CP3500*", "*"}, 244 /*quirks*/ DA_Q_NO_SYNC_CACHE 245 }, 246 { 247 /* 248 * The CISS RAID controllers do not support SYNC_CACHE 249 */ 250 {T_DIRECT, SIP_MEDIA_FIXED, "COMPAQ", "RAID*", "*"}, 251 /*quirks*/ DA_Q_NO_SYNC_CACHE 252 }, 253 /* USB mass storage devices supported by umass(4) */ 254 { 255 /* 256 * EXATELECOM (Sigmatel) i-Bead 100/105 USB Flash MP3 Player 257 * PR: kern/51675 258 */ 259 {T_DIRECT, SIP_MEDIA_REMOVABLE, "EXATEL", "i-BEAD10*", "*"}, 260 /*quirks*/ DA_Q_NO_SYNC_CACHE 261 }, 262 { 263 /* 264 * Power Quotient Int. (PQI) USB flash key 265 * PR: kern/53067 266 */ 267 {T_DIRECT, SIP_MEDIA_REMOVABLE, "Generic*", "USB Flash Disk*", 268 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 269 }, 270 { 271 /* 272 * Creative Nomad MUVO mp3 player (USB) 273 * PR: kern/53094 274 */ 275 {T_DIRECT, SIP_MEDIA_REMOVABLE, "CREATIVE", "NOMAD_MUVO", "*"}, 276 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 277 }, 278 { 279 /* 280 * Jungsoft NEXDISK USB flash key 281 * PR: kern/54737 282 */ 283 {T_DIRECT, SIP_MEDIA_REMOVABLE, "JUNGSOFT", "NEXDISK*", "*"}, 284 /*quirks*/ DA_Q_NO_SYNC_CACHE 285 }, 286 { 287 /* 288 * FreeDik USB Mini Data Drive 289 * PR: kern/54786 290 */ 291 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FreeDik*", "Mini Data Drive", 292 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 293 }, 294 { 295 /* 296 * Sigmatel USB Flash MP3 Player 297 * PR: kern/57046 298 */ 299 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SigmaTel", "MSCN", "*"}, 300 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 301 }, 302 { 303 /* 304 * Neuros USB Digital Audio Computer 305 * PR: kern/63645 306 */ 307 {T_DIRECT, SIP_MEDIA_REMOVABLE, "NEUROS", "dig. audio comp.", 308 "*"}, /*quirks*/ DA_Q_NO_SYNC_CACHE 309 }, 310 { 311 /* 312 * SEAGRAND NP-900 MP3 Player 313 * PR: kern/64563 314 */ 315 {T_DIRECT, SIP_MEDIA_REMOVABLE, "SEAGRAND", "NP-900*", "*"}, 316 /*quirks*/ DA_Q_NO_SYNC_CACHE|DA_Q_NO_PREVENT 317 }, 318 { 319 /* 320 * iRiver iFP MP3 player (with UMS Firmware) 321 * PR: kern/54881, i386/63941, kern/66124 322 */ 323 {T_DIRECT, SIP_MEDIA_REMOVABLE, "iRiver", "iFP*", "*"}, 324 /*quirks*/ DA_Q_NO_SYNC_CACHE 325 }, 326 { 327 /* 328 * Frontier Labs NEX IA+ Digital Audio Player, rev 1.10/0.01 329 * PR: kern/70158 330 */ 331 {T_DIRECT, SIP_MEDIA_REMOVABLE, "FL" , "Nex*", "*"}, 332 /*quirks*/ DA_Q_NO_SYNC_CACHE 333 }, 334 { 335 /* 336 * ZICPlay USB MP3 Player with FM 337 * PR: kern/75057 338 */ 339 {T_DIRECT, SIP_MEDIA_REMOVABLE, "ACTIONS*" , "USB DISK*", "*"}, 340 /*quirks*/ DA_Q_NO_SYNC_CACHE 341 }, 342 }; 343 344 static disk_strategy_t dastrategy; 345 static dumper_t dadump; 346 static periph_init_t dainit; 347 static void daasync(void *callback_arg, u_int32_t code, 348 struct cam_path *path, void *arg); 349 static void dasysctlinit(void *context, int pending); 350 static int dacmdsizesysctl(SYSCTL_HANDLER_ARGS); 351 static periph_ctor_t daregister; 352 static periph_dtor_t dacleanup; 353 static periph_start_t dastart; 354 static periph_oninv_t daoninvalidate; 355 static void dadone(struct cam_periph *periph, 356 union ccb *done_ccb); 357 static int daerror(union ccb *ccb, u_int32_t cam_flags, 358 u_int32_t sense_flags); 359 static void daprevent(struct cam_periph *periph, int action); 360 static int dagetcapacity(struct cam_periph *periph); 361 static void dasetgeom(struct cam_periph *periph, uint32_t block_len, 362 uint64_t maxsector); 363 static timeout_t dasendorderedtag; 364 static void dashutdown(void *arg, int howto); 365 366 #ifndef DA_DEFAULT_TIMEOUT 367 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 368 #endif 369 370 #ifndef DA_DEFAULT_RETRY 371 #define DA_DEFAULT_RETRY 4 372 #endif 373 374 static int da_retry_count = DA_DEFAULT_RETRY; 375 static int da_default_timeout = DA_DEFAULT_TIMEOUT; 376 377 SYSCTL_NODE(_kern_cam, OID_AUTO, da, CTLFLAG_RD, 0, 378 "CAM Direct Access Disk driver"); 379 SYSCTL_INT(_kern_cam_da, OID_AUTO, retry_count, CTLFLAG_RW, 380 &da_retry_count, 0, "Normal I/O retry count"); 381 TUNABLE_INT("kern.cam.da.retry_count", &da_retry_count); 382 SYSCTL_INT(_kern_cam_da, OID_AUTO, default_timeout, CTLFLAG_RW, 383 &da_default_timeout, 0, "Normal I/O timeout (in seconds)"); 384 TUNABLE_INT("kern.cam.da.default_timeout", &da_default_timeout); 385 386 /* 387 * DA_ORDEREDTAG_INTERVAL determines how often, relative 388 * to the default timeout, we check to see whether an ordered 389 * tagged transaction is appropriate to prevent simple tag 390 * starvation. Since we'd like to ensure that there is at least 391 * 1/2 of the timeout length left for a starved transaction to 392 * complete after we've sent an ordered tag, we must poll at least 393 * four times in every timeout period. This takes care of the worst 394 * case where a starved transaction starts during an interval that 395 * meets the requirement "don't send an ordered tag" test so it takes 396 * us two intervals to determine that a tag must be sent. 397 */ 398 #ifndef DA_ORDEREDTAG_INTERVAL 399 #define DA_ORDEREDTAG_INTERVAL 4 400 #endif 401 402 static struct periph_driver dadriver = 403 { 404 dainit, "da", 405 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 406 }; 407 408 PERIPHDRIVER_DECLARE(da, dadriver); 409 410 static SLIST_HEAD(,da_softc) softc_list; 411 412 static int 413 daopen(struct disk *dp) 414 { 415 struct cam_periph *periph; 416 struct da_softc *softc; 417 int unit; 418 int error; 419 int s; 420 421 s = splsoftcam(); 422 periph = (struct cam_periph *)dp->d_drv1; 423 if (periph == NULL) { 424 splx(s); 425 return (ENXIO); 426 } 427 unit = periph->unit_number; 428 429 softc = (struct da_softc *)periph->softc; 430 431 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 432 ("daopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit, 433 unit)); 434 435 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) 436 return (error); /* error code from tsleep */ 437 438 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 439 return(ENXIO); 440 softc->flags |= DA_FLAG_OPEN; 441 442 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { 443 /* Invalidate our pack information. */ 444 softc->flags &= ~DA_FLAG_PACK_INVALID; 445 } 446 splx(s); 447 448 error = dagetcapacity(periph); 449 450 if (error == 0) { 451 452 softc->disk->d_sectorsize = softc->params.secsize; 453 softc->disk->d_mediasize = softc->params.secsize * (off_t)softc->params.sectors; 454 /* XXX: these are not actually "firmware" values, so they may be wrong */ 455 softc->disk->d_fwsectors = softc->params.secs_per_track; 456 softc->disk->d_fwheads = softc->params.heads; 457 softc->disk->d_devstat->block_size = softc->params.secsize; 458 softc->disk->d_devstat->flags &= ~DEVSTAT_BS_UNAVAILABLE; 459 } 460 461 if (error == 0) { 462 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0 && 463 (softc->quirks & DA_Q_NO_PREVENT) == 0) 464 daprevent(periph, PR_PREVENT); 465 } else { 466 softc->flags &= ~DA_FLAG_OPEN; 467 cam_periph_release(periph); 468 } 469 cam_periph_unlock(periph); 470 return (error); 471 } 472 473 static int 474 daclose(struct disk *dp) 475 { 476 struct cam_periph *periph; 477 struct da_softc *softc; 478 int error; 479 480 periph = (struct cam_periph *)dp->d_drv1; 481 if (periph == NULL) 482 return (ENXIO); 483 484 softc = (struct da_softc *)periph->softc; 485 486 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) { 487 return (error); /* error code from tsleep */ 488 } 489 490 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 491 union ccb *ccb; 492 493 ccb = cam_periph_getccb(periph, /*priority*/1); 494 495 scsi_synchronize_cache(&ccb->csio, 496 /*retries*/1, 497 /*cbfcnp*/dadone, 498 MSG_SIMPLE_Q_TAG, 499 /*begin_lba*/0,/* Cover the whole disk */ 500 /*lb_count*/0, 501 SSD_FULL_SIZE, 502 5 * 60 * 1000); 503 504 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 505 /*sense_flags*/SF_RETRY_UA, 506 softc->disk->d_devstat); 507 508 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 509 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == 510 CAM_SCSI_STATUS_ERROR) { 511 int asc, ascq; 512 int sense_key, error_code; 513 514 scsi_extract_sense(&ccb->csio.sense_data, 515 &error_code, 516 &sense_key, 517 &asc, &ascq); 518 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 519 scsi_sense_print(&ccb->csio); 520 } else { 521 xpt_print_path(periph->path); 522 printf("Synchronize cache failed, status " 523 "== 0x%x, scsi status == 0x%x\n", 524 ccb->csio.ccb_h.status, 525 ccb->csio.scsi_status); 526 } 527 } 528 529 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 530 cam_release_devq(ccb->ccb_h.path, 531 /*relsim_flags*/0, 532 /*reduction*/0, 533 /*timeout*/0, 534 /*getcount_only*/0); 535 536 xpt_release_ccb(ccb); 537 538 } 539 540 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) { 541 if ((softc->quirks & DA_Q_NO_PREVENT) == 0) 542 daprevent(periph, PR_ALLOW); 543 /* 544 * If we've got removeable media, mark the blocksize as 545 * unavailable, since it could change when new media is 546 * inserted. 547 */ 548 softc->disk->d_devstat->flags |= DEVSTAT_BS_UNAVAILABLE; 549 } 550 551 softc->flags &= ~DA_FLAG_OPEN; 552 cam_periph_unlock(periph); 553 cam_periph_release(periph); 554 return (0); 555 } 556 557 /* 558 * Actually translate the requested transfer into one the physical driver 559 * can understand. The transfer is described by a buf and will include 560 * only one physical transfer. 561 */ 562 static void 563 dastrategy(struct bio *bp) 564 { 565 struct cam_periph *periph; 566 struct da_softc *softc; 567 int s; 568 569 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 570 if (periph == NULL) { 571 biofinish(bp, NULL, ENXIO); 572 return; 573 } 574 softc = (struct da_softc *)periph->softc; 575 #if 0 576 /* 577 * check it's not too big a transfer for our adapter 578 */ 579 scsi_minphys(bp,&sd_switch); 580 #endif 581 582 /* 583 * Mask interrupts so that the pack cannot be invalidated until 584 * after we are in the queue. Otherwise, we might not properly 585 * clean up one of the buffers. 586 */ 587 s = splbio(); 588 589 /* 590 * If the device has been made invalid, error out 591 */ 592 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 593 splx(s); 594 biofinish(bp, NULL, ENXIO); 595 return; 596 } 597 598 /* 599 * Place it in the queue of disk activities for this disk 600 */ 601 bioq_disksort(&softc->bio_queue, bp); 602 603 splx(s); 604 605 /* 606 * Schedule ourselves for performing the work. 607 */ 608 xpt_schedule(periph, /* XXX priority */1); 609 610 return; 611 } 612 613 static int 614 dadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 615 { 616 struct cam_periph *periph; 617 struct da_softc *softc; 618 u_int secsize; 619 struct ccb_scsiio csio; 620 struct disk *dp; 621 622 dp = arg; 623 periph = dp->d_drv1; 624 if (periph == NULL) 625 return (ENXIO); 626 softc = (struct da_softc *)periph->softc; 627 secsize = softc->params.secsize; 628 629 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) 630 return (ENXIO); 631 632 if (length > 0) { 633 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 634 csio.ccb_h.ccb_state = DA_CCB_DUMP; 635 scsi_read_write(&csio, 636 /*retries*/1, 637 dadone, 638 MSG_ORDERED_Q_TAG, 639 /*read*/FALSE, 640 /*byte2*/0, 641 /*minimum_cmd_size*/ softc->minimum_cmd_size, 642 offset / secsize, 643 length / secsize, 644 /*data_ptr*/(u_int8_t *) virtual, 645 /*dxfer_len*/length, 646 /*sense_len*/SSD_FULL_SIZE, 647 DA_DEFAULT_TIMEOUT * 1000); 648 xpt_polled_action((union ccb *)&csio); 649 650 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 651 printf("Aborting dump due to I/O error.\n"); 652 if ((csio.ccb_h.status & CAM_STATUS_MASK) == 653 CAM_SCSI_STATUS_ERROR) 654 scsi_sense_print(&csio); 655 else 656 printf("status == 0x%x, scsi status == 0x%x\n", 657 csio.ccb_h.status, csio.scsi_status); 658 return(EIO); 659 } 660 return(0); 661 } 662 663 /* 664 * Sync the disk cache contents to the physical media. 665 */ 666 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 667 668 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 669 csio.ccb_h.ccb_state = DA_CCB_DUMP; 670 scsi_synchronize_cache(&csio, 671 /*retries*/1, 672 /*cbfcnp*/dadone, 673 MSG_SIMPLE_Q_TAG, 674 /*begin_lba*/0,/* Cover the whole disk */ 675 /*lb_count*/0, 676 SSD_FULL_SIZE, 677 5 * 60 * 1000); 678 xpt_polled_action((union ccb *)&csio); 679 680 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 681 if ((csio.ccb_h.status & CAM_STATUS_MASK) == 682 CAM_SCSI_STATUS_ERROR) { 683 int asc, ascq; 684 int sense_key, error_code; 685 686 scsi_extract_sense(&csio.sense_data, 687 &error_code, 688 &sense_key, 689 &asc, &ascq); 690 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 691 scsi_sense_print(&csio); 692 } else { 693 xpt_print_path(periph->path); 694 printf("Synchronize cache failed, status " 695 "== 0x%x, scsi status == 0x%x\n", 696 csio.ccb_h.status, csio.scsi_status); 697 } 698 } 699 } 700 return (0); 701 } 702 703 static void 704 dainit(void) 705 { 706 cam_status status; 707 struct cam_path *path; 708 709 SLIST_INIT(&softc_list); 710 711 /* 712 * Install a global async callback. This callback will 713 * receive async callbacks like "new device found". 714 */ 715 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 716 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 717 718 if (status == CAM_REQ_CMP) { 719 struct ccb_setasync csa; 720 721 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 722 csa.ccb_h.func_code = XPT_SASYNC_CB; 723 csa.event_enable = AC_FOUND_DEVICE; 724 csa.callback = daasync; 725 csa.callback_arg = NULL; 726 xpt_action((union ccb *)&csa); 727 status = csa.ccb_h.status; 728 xpt_free_path(path); 729 } 730 731 if (status != CAM_REQ_CMP) { 732 printf("da: Failed to attach master async callback " 733 "due to status 0x%x!\n", status); 734 } else { 735 736 /* 737 * Schedule a periodic event to occasionally send an 738 * ordered tag to a device. 739 */ 740 timeout(dasendorderedtag, NULL, 741 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL); 742 743 /* Register our shutdown event handler */ 744 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, dashutdown, 745 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 746 printf("dainit: shutdown event registration failed!\n"); 747 } 748 } 749 750 static void 751 daoninvalidate(struct cam_periph *periph) 752 { 753 int s; 754 struct da_softc *softc; 755 struct ccb_setasync csa; 756 757 softc = (struct da_softc *)periph->softc; 758 759 /* 760 * De-register any async callbacks. 761 */ 762 xpt_setup_ccb(&csa.ccb_h, periph->path, 763 /* priority */ 5); 764 csa.ccb_h.func_code = XPT_SASYNC_CB; 765 csa.event_enable = 0; 766 csa.callback = daasync; 767 csa.callback_arg = periph; 768 xpt_action((union ccb *)&csa); 769 770 softc->flags |= DA_FLAG_PACK_INVALID; 771 772 /* 773 * Although the oninvalidate() routines are always called at 774 * splsoftcam, we need to be at splbio() here to keep the buffer 775 * queue from being modified while we traverse it. 776 */ 777 s = splbio(); 778 779 /* 780 * Return all queued I/O with ENXIO. 781 * XXX Handle any transactions queued to the card 782 * with XPT_ABORT_CCB. 783 */ 784 bioq_flush(&softc->bio_queue, NULL, ENXIO); 785 splx(s); 786 787 SLIST_REMOVE(&softc_list, softc, da_softc, links); 788 789 xpt_print_path(periph->path); 790 printf("lost device\n"); 791 } 792 793 static void 794 dacleanup(struct cam_periph *periph) 795 { 796 struct da_softc *softc; 797 798 softc = (struct da_softc *)periph->softc; 799 800 xpt_print_path(periph->path); 801 printf("removing device entry\n"); 802 /* 803 * If we can't free the sysctl tree, oh well... 804 */ 805 if ((softc->flags & DA_FLAG_SCTX_INIT) != 0 806 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { 807 xpt_print_path(periph->path); 808 printf("can't remove sysctl context\n"); 809 } 810 disk_destroy(softc->disk); 811 free(softc, M_DEVBUF); 812 } 813 814 static void 815 daasync(void *callback_arg, u_int32_t code, 816 struct cam_path *path, void *arg) 817 { 818 struct cam_periph *periph; 819 820 periph = (struct cam_periph *)callback_arg; 821 switch (code) { 822 case AC_FOUND_DEVICE: 823 { 824 struct ccb_getdev *cgd; 825 cam_status status; 826 827 cgd = (struct ccb_getdev *)arg; 828 if (cgd == NULL) 829 break; 830 831 if (SID_TYPE(&cgd->inq_data) != T_DIRECT 832 && SID_TYPE(&cgd->inq_data) != T_RBC 833 && SID_TYPE(&cgd->inq_data) != T_OPTICAL) 834 break; 835 836 /* 837 * Allocate a peripheral instance for 838 * this device and start the probe 839 * process. 840 */ 841 status = cam_periph_alloc(daregister, daoninvalidate, 842 dacleanup, dastart, 843 "da", CAM_PERIPH_BIO, 844 cgd->ccb_h.path, daasync, 845 AC_FOUND_DEVICE, cgd); 846 847 if (status != CAM_REQ_CMP 848 && status != CAM_REQ_INPROG) 849 printf("daasync: Unable to attach to new device " 850 "due to status 0x%x\n", status); 851 break; 852 } 853 case AC_SENT_BDR: 854 case AC_BUS_RESET: 855 { 856 struct da_softc *softc; 857 struct ccb_hdr *ccbh; 858 int s; 859 860 softc = (struct da_softc *)periph->softc; 861 s = splsoftcam(); 862 /* 863 * Don't fail on the expected unit attention 864 * that will occur. 865 */ 866 softc->flags |= DA_FLAG_RETRY_UA; 867 LIST_FOREACH(ccbh, &softc->pending_ccbs, periph_links.le) 868 ccbh->ccb_state |= DA_CCB_RETRY_UA; 869 splx(s); 870 /* FALLTHROUGH*/ 871 } 872 default: 873 cam_periph_async(periph, code, path, arg); 874 break; 875 } 876 } 877 878 static void 879 dasysctlinit(void *context, int pending) 880 { 881 struct cam_periph *periph; 882 struct da_softc *softc; 883 char tmpstr[80], tmpstr2[80]; 884 885 periph = (struct cam_periph *)context; 886 softc = (struct da_softc *)periph->softc; 887 888 snprintf(tmpstr, sizeof(tmpstr), "CAM DA unit %d", periph->unit_number); 889 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 890 891 mtx_lock(&Giant); 892 sysctl_ctx_init(&softc->sysctl_ctx); 893 softc->flags |= DA_FLAG_SCTX_INIT; 894 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 895 SYSCTL_STATIC_CHILDREN(_kern_cam_da), OID_AUTO, tmpstr2, 896 CTLFLAG_RD, 0, tmpstr); 897 if (softc->sysctl_tree == NULL) { 898 printf("dasysctlinit: unable to allocate sysctl tree\n"); 899 return; 900 } 901 902 /* 903 * Now register the sysctl handler, so the user can the value on 904 * the fly. 905 */ 906 SYSCTL_ADD_PROC(&softc->sysctl_ctx,SYSCTL_CHILDREN(softc->sysctl_tree), 907 OID_AUTO, "minimum_cmd_size", CTLTYPE_INT | CTLFLAG_RW, 908 &softc->minimum_cmd_size, 0, dacmdsizesysctl, "I", 909 "Minimum CDB size"); 910 911 mtx_unlock(&Giant); 912 } 913 914 static int 915 dacmdsizesysctl(SYSCTL_HANDLER_ARGS) 916 { 917 int error, value; 918 919 value = *(int *)arg1; 920 921 error = sysctl_handle_int(oidp, &value, 0, req); 922 923 if ((error != 0) 924 || (req->newptr == NULL)) 925 return (error); 926 927 /* 928 * Acceptable values here are 6, 10, 12 or 16. 929 */ 930 if (value < 6) 931 value = 6; 932 else if ((value > 6) 933 && (value <= 10)) 934 value = 10; 935 else if ((value > 10) 936 && (value <= 12)) 937 value = 12; 938 else if (value > 12) 939 value = 16; 940 941 *(int *)arg1 = value; 942 943 return (0); 944 } 945 946 static cam_status 947 daregister(struct cam_periph *periph, void *arg) 948 { 949 int s; 950 struct da_softc *softc; 951 struct ccb_setasync csa; 952 struct ccb_pathinq cpi; 953 struct ccb_getdev *cgd; 954 char tmpstr[80]; 955 caddr_t match; 956 957 cgd = (struct ccb_getdev *)arg; 958 if (periph == NULL) { 959 printf("daregister: periph was NULL!!\n"); 960 return(CAM_REQ_CMP_ERR); 961 } 962 963 if (cgd == NULL) { 964 printf("daregister: no getdev CCB, can't register device\n"); 965 return(CAM_REQ_CMP_ERR); 966 } 967 968 softc = (struct da_softc *)malloc(sizeof(*softc), M_DEVBUF, 969 M_NOWAIT|M_ZERO); 970 971 if (softc == NULL) { 972 printf("daregister: Unable to probe new device. " 973 "Unable to allocate softc\n"); 974 return(CAM_REQ_CMP_ERR); 975 } 976 977 LIST_INIT(&softc->pending_ccbs); 978 softc->state = DA_STATE_PROBE; 979 bioq_init(&softc->bio_queue); 980 if (SID_IS_REMOVABLE(&cgd->inq_data)) 981 softc->flags |= DA_FLAG_PACK_REMOVABLE; 982 if ((cgd->inq_data.flags & SID_CmdQue) != 0) 983 softc->flags |= DA_FLAG_TAGGED_QUEUING; 984 985 periph->softc = softc; 986 987 /* 988 * See if this device has any quirks. 989 */ 990 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 991 (caddr_t)da_quirk_table, 992 sizeof(da_quirk_table)/sizeof(*da_quirk_table), 993 sizeof(*da_quirk_table), scsi_inquiry_match); 994 995 if (match != NULL) 996 softc->quirks = ((struct da_quirk_entry *)match)->quirks; 997 else 998 softc->quirks = DA_Q_NONE; 999 1000 /* Check if the SIM does not want 6 byte commands */ 1001 xpt_setup_ccb(&cpi.ccb_h, periph->path, /*priority*/1); 1002 cpi.ccb_h.func_code = XPT_PATH_INQ; 1003 xpt_action((union ccb *)&cpi); 1004 if (cpi.ccb_h.status == CAM_REQ_CMP && (cpi.hba_misc & PIM_NO_6_BYTE)) 1005 softc->quirks |= DA_Q_NO_6_BYTE; 1006 1007 TASK_INIT(&softc->sysctl_task, 0, dasysctlinit, periph); 1008 1009 /* 1010 * RBC devices don't have to support READ(6), only READ(10). 1011 */ 1012 if (softc->quirks & DA_Q_NO_6_BYTE || SID_TYPE(&cgd->inq_data) == T_RBC) 1013 softc->minimum_cmd_size = 10; 1014 else 1015 softc->minimum_cmd_size = 6; 1016 1017 /* 1018 * Load the user's default, if any. 1019 */ 1020 snprintf(tmpstr, sizeof(tmpstr), "kern.cam.da.%d.minimum_cmd_size", 1021 periph->unit_number); 1022 TUNABLE_INT_FETCH(tmpstr, &softc->minimum_cmd_size); 1023 1024 /* 1025 * 6, 10, 12 and 16 are the currently permissible values. 1026 */ 1027 if (softc->minimum_cmd_size < 6) 1028 softc->minimum_cmd_size = 6; 1029 else if ((softc->minimum_cmd_size > 6) 1030 && (softc->minimum_cmd_size <= 10)) 1031 softc->minimum_cmd_size = 10; 1032 else if ((softc->minimum_cmd_size > 10) 1033 && (softc->minimum_cmd_size <= 12)) 1034 softc->minimum_cmd_size = 12; 1035 else if (softc->minimum_cmd_size > 12) 1036 softc->minimum_cmd_size = 16; 1037 1038 /* 1039 * Block our timeout handler while we 1040 * add this softc to the dev list. 1041 */ 1042 s = splsoftclock(); 1043 SLIST_INSERT_HEAD(&softc_list, softc, links); 1044 splx(s); 1045 1046 /* 1047 * Register this media as a disk 1048 */ 1049 1050 softc->disk = disk_alloc(); 1051 softc->disk->d_open = daopen; 1052 softc->disk->d_close = daclose; 1053 softc->disk->d_strategy = dastrategy; 1054 softc->disk->d_dump = dadump; 1055 softc->disk->d_name = "da"; 1056 softc->disk->d_drv1 = periph; 1057 softc->disk->d_maxsize = DFLTPHYS; /* XXX: probably not arbitrary */ 1058 softc->disk->d_unit = periph->unit_number; 1059 softc->disk->d_flags = DISKFLAG_NEEDSGIANT; 1060 disk_create(softc->disk, DISK_VERSION); 1061 1062 /* 1063 * Add async callbacks for bus reset and 1064 * bus device reset calls. I don't bother 1065 * checking if this fails as, in most cases, 1066 * the system will function just fine without 1067 * them and the only alternative would be to 1068 * not attach the device on failure. 1069 */ 1070 xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); 1071 csa.ccb_h.func_code = XPT_SASYNC_CB; 1072 csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; 1073 csa.callback = daasync; 1074 csa.callback_arg = periph; 1075 xpt_action((union ccb *)&csa); 1076 /* 1077 * Lock this peripheral until we are setup. 1078 * This first call can't block 1079 */ 1080 (void)cam_periph_lock(periph, PRIBIO); 1081 xpt_schedule(periph, /*priority*/5); 1082 1083 return(CAM_REQ_CMP); 1084 } 1085 1086 static void 1087 dastart(struct cam_periph *periph, union ccb *start_ccb) 1088 { 1089 struct da_softc *softc; 1090 1091 softc = (struct da_softc *)periph->softc; 1092 1093 1094 switch (softc->state) { 1095 case DA_STATE_NORMAL: 1096 { 1097 /* Pull a buffer from the queue and get going on it */ 1098 struct bio *bp; 1099 int s; 1100 1101 /* 1102 * See if there is a buf with work for us to do.. 1103 */ 1104 s = splbio(); 1105 bp = bioq_first(&softc->bio_queue); 1106 if (periph->immediate_priority <= periph->pinfo.priority) { 1107 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 1108 ("queuing for immediate ccb\n")); 1109 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING; 1110 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 1111 periph_links.sle); 1112 periph->immediate_priority = CAM_PRIORITY_NONE; 1113 splx(s); 1114 wakeup(&periph->ccb_list); 1115 } else if (bp == NULL) { 1116 splx(s); 1117 xpt_release_ccb(start_ccb); 1118 } else { 1119 int oldspl; 1120 u_int8_t tag_code; 1121 1122 bioq_remove(&softc->bio_queue, bp); 1123 1124 if ((softc->flags & DA_FLAG_NEED_OTAG) != 0) { 1125 softc->flags &= ~DA_FLAG_NEED_OTAG; 1126 softc->ordered_tag_count++; 1127 tag_code = MSG_ORDERED_Q_TAG; 1128 } else { 1129 tag_code = MSG_SIMPLE_Q_TAG; 1130 } 1131 scsi_read_write(&start_ccb->csio, 1132 /*retries*/da_retry_count, 1133 /*cbfcnp*/dadone, 1134 /*tag_action*/tag_code, 1135 /*read_op*/bp->bio_cmd == BIO_READ, 1136 /*byte2*/0, 1137 softc->minimum_cmd_size, 1138 /*lba*/bp->bio_pblkno, 1139 /*block_count*/bp->bio_bcount / 1140 softc->params.secsize, 1141 /*data_ptr*/ bp->bio_data, 1142 /*dxfer_len*/ bp->bio_bcount, 1143 /*sense_len*/SSD_FULL_SIZE, 1144 /*timeout*/da_default_timeout*1000); 1145 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 1146 1147 /* 1148 * Block out any asyncronous callbacks 1149 * while we touch the pending ccb list. 1150 */ 1151 oldspl = splcam(); 1152 LIST_INSERT_HEAD(&softc->pending_ccbs, 1153 &start_ccb->ccb_h, periph_links.le); 1154 softc->outstanding_cmds++; 1155 splx(oldspl); 1156 1157 /* We expect a unit attention from this device */ 1158 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 1159 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 1160 softc->flags &= ~DA_FLAG_RETRY_UA; 1161 } 1162 1163 start_ccb->ccb_h.ccb_bp = bp; 1164 bp = bioq_first(&softc->bio_queue); 1165 splx(s); 1166 1167 xpt_action(start_ccb); 1168 } 1169 1170 if (bp != NULL) { 1171 /* Have more work to do, so ensure we stay scheduled */ 1172 xpt_schedule(periph, /* XXX priority */1); 1173 } 1174 break; 1175 } 1176 case DA_STATE_PROBE: 1177 { 1178 struct ccb_scsiio *csio; 1179 struct scsi_read_capacity_data *rcap; 1180 1181 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 1182 M_TEMP, 1183 M_NOWAIT); 1184 if (rcap == NULL) { 1185 printf("dastart: Couldn't malloc read_capacity data\n"); 1186 /* da_free_periph??? */ 1187 break; 1188 } 1189 csio = &start_ccb->csio; 1190 scsi_read_capacity(csio, 1191 /*retries*/4, 1192 dadone, 1193 MSG_SIMPLE_Q_TAG, 1194 rcap, 1195 SSD_FULL_SIZE, 1196 /*timeout*/5000); 1197 start_ccb->ccb_h.ccb_bp = NULL; 1198 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE; 1199 xpt_action(start_ccb); 1200 break; 1201 } 1202 case DA_STATE_PROBE2: 1203 { 1204 struct ccb_scsiio *csio; 1205 struct scsi_read_capacity_data_long *rcaplong; 1206 1207 rcaplong = (struct scsi_read_capacity_data_long *) 1208 malloc(sizeof(*rcaplong), M_TEMP, M_NOWAIT); 1209 if (rcaplong == NULL) { 1210 printf("dastart: Couldn't malloc read_capacity data\n"); 1211 /* da_free_periph??? */ 1212 break; 1213 } 1214 csio = &start_ccb->csio; 1215 scsi_read_capacity_16(csio, 1216 /*retries*/ 4, 1217 /*cbfcnp*/ dadone, 1218 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1219 /*lba*/ 0, 1220 /*reladr*/ 0, 1221 /*pmi*/ 0, 1222 rcaplong, 1223 /*sense_len*/ SSD_FULL_SIZE, 1224 /*timeout*/ 60000); 1225 start_ccb->ccb_h.ccb_bp = NULL; 1226 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE2; 1227 xpt_action(start_ccb); 1228 break; 1229 } 1230 } 1231 } 1232 1233 static int 1234 cmd6workaround(union ccb *ccb) 1235 { 1236 struct scsi_rw_6 cmd6; 1237 struct scsi_rw_10 *cmd10; 1238 struct da_softc *softc; 1239 u_int8_t *cdb; 1240 int frozen; 1241 1242 cdb = ccb->csio.cdb_io.cdb_bytes; 1243 1244 /* Translation only possible if CDB is an array and cmd is R/W6 */ 1245 if ((ccb->ccb_h.flags & CAM_CDB_POINTER) != 0 || 1246 (*cdb != READ_6 && *cdb != WRITE_6)) 1247 return 0; 1248 1249 xpt_print_path(ccb->ccb_h.path); 1250 printf("READ(6)/WRITE(6) not supported, " 1251 "increasing minimum_cmd_size to 10.\n"); 1252 softc = (struct da_softc *)xpt_path_periph(ccb->ccb_h.path)->softc; 1253 softc->minimum_cmd_size = 10; 1254 1255 bcopy(cdb, &cmd6, sizeof(struct scsi_rw_6)); 1256 cmd10 = (struct scsi_rw_10 *)cdb; 1257 cmd10->opcode = (cmd6.opcode == READ_6) ? READ_10 : WRITE_10; 1258 cmd10->byte2 = 0; 1259 scsi_ulto4b(scsi_3btoul(cmd6.addr), cmd10->addr); 1260 cmd10->reserved = 0; 1261 scsi_ulto2b(cmd6.length, cmd10->length); 1262 cmd10->control = cmd6.control; 1263 ccb->csio.cdb_len = sizeof(*cmd10); 1264 1265 /* Requeue request, unfreezing queue if necessary */ 1266 frozen = (ccb->ccb_h.status & CAM_DEV_QFRZN) != 0; 1267 ccb->ccb_h.status = CAM_REQUEUE_REQ; 1268 xpt_action(ccb); 1269 if (frozen) { 1270 cam_release_devq(ccb->ccb_h.path, 1271 /*relsim_flags*/0, 1272 /*reduction*/0, 1273 /*timeout*/0, 1274 /*getcount_only*/0); 1275 } 1276 return (ERESTART); 1277 } 1278 1279 static void 1280 dadone(struct cam_periph *periph, union ccb *done_ccb) 1281 { 1282 struct da_softc *softc; 1283 struct ccb_scsiio *csio; 1284 1285 softc = (struct da_softc *)periph->softc; 1286 csio = &done_ccb->csio; 1287 switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) { 1288 case DA_CCB_BUFFER_IO: 1289 { 1290 struct bio *bp; 1291 int oldspl; 1292 1293 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 1294 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1295 int error; 1296 int s; 1297 int sf; 1298 1299 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 1300 sf = SF_RETRY_UA; 1301 else 1302 sf = 0; 1303 1304 error = daerror(done_ccb, CAM_RETRY_SELTO, sf); 1305 if (error == ERESTART) { 1306 /* 1307 * A retry was scheuled, so 1308 * just return. 1309 */ 1310 return; 1311 } 1312 if (error != 0) { 1313 1314 s = splbio(); 1315 1316 if (error == ENXIO) { 1317 /* 1318 * Catastrophic error. Mark our pack as 1319 * invalid. 1320 */ 1321 /* XXX See if this is really a media 1322 * change first. 1323 */ 1324 xpt_print_path(periph->path); 1325 printf("Invalidating pack\n"); 1326 softc->flags |= DA_FLAG_PACK_INVALID; 1327 } 1328 1329 /* 1330 * return all queued I/O with EIO, so that 1331 * the client can retry these I/Os in the 1332 * proper order should it attempt to recover. 1333 */ 1334 bioq_flush(&softc->bio_queue, NULL, EIO); 1335 splx(s); 1336 bp->bio_error = error; 1337 bp->bio_resid = bp->bio_bcount; 1338 bp->bio_flags |= BIO_ERROR; 1339 } else { 1340 bp->bio_resid = csio->resid; 1341 bp->bio_error = 0; 1342 if (bp->bio_resid != 0) 1343 bp->bio_flags |= BIO_ERROR; 1344 } 1345 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1346 cam_release_devq(done_ccb->ccb_h.path, 1347 /*relsim_flags*/0, 1348 /*reduction*/0, 1349 /*timeout*/0, 1350 /*getcount_only*/0); 1351 } else { 1352 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1353 panic("REQ_CMP with QFRZN"); 1354 bp->bio_resid = csio->resid; 1355 if (csio->resid > 0) 1356 bp->bio_flags |= BIO_ERROR; 1357 } 1358 1359 /* 1360 * Block out any asyncronous callbacks 1361 * while we touch the pending ccb list. 1362 */ 1363 oldspl = splcam(); 1364 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 1365 softc->outstanding_cmds--; 1366 if (softc->outstanding_cmds == 0) 1367 softc->flags |= DA_FLAG_WENT_IDLE; 1368 splx(oldspl); 1369 1370 biodone(bp); 1371 break; 1372 } 1373 case DA_CCB_PROBE: 1374 case DA_CCB_PROBE2: 1375 { 1376 struct scsi_read_capacity_data *rdcap; 1377 struct scsi_read_capacity_data_long *rcaplong; 1378 char announce_buf[80]; 1379 1380 rdcap = NULL; 1381 rcaplong = NULL; 1382 if (softc->state == DA_STATE_PROBE) 1383 rdcap =(struct scsi_read_capacity_data *)csio->data_ptr; 1384 else 1385 rcaplong = (struct scsi_read_capacity_data_long *) 1386 csio->data_ptr; 1387 1388 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 1389 struct disk_params *dp; 1390 uint32_t block_size; 1391 uint64_t maxsector; 1392 1393 if (softc->state == DA_STATE_PROBE) { 1394 block_size = scsi_4btoul(rdcap->length); 1395 maxsector = scsi_4btoul(rdcap->addr); 1396 1397 /* 1398 * According to SBC-2, if the standard 10 1399 * byte READ CAPACITY command returns 2^32, 1400 * we should issue the 16 byte version of 1401 * the command, since the device in question 1402 * has more sectors than can be represented 1403 * with the short version of the command. 1404 */ 1405 if (maxsector == 0xffffffff) { 1406 softc->state = DA_STATE_PROBE2; 1407 free(rdcap, M_TEMP); 1408 xpt_release_ccb(done_ccb); 1409 xpt_schedule(periph, /*priority*/5); 1410 return; 1411 } 1412 } else { 1413 block_size = scsi_4btoul(rcaplong->length); 1414 maxsector = scsi_8btou64(rcaplong->addr); 1415 } 1416 dasetgeom(periph, block_size, maxsector); 1417 dp = &softc->params; 1418 snprintf(announce_buf, sizeof(announce_buf), 1419 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)", 1420 (uintmax_t) (((uintmax_t)dp->secsize * 1421 dp->sectors) / (1024*1024)), 1422 (uintmax_t)dp->sectors, 1423 dp->secsize, dp->heads, dp->secs_per_track, 1424 dp->cylinders); 1425 } else { 1426 int error; 1427 1428 announce_buf[0] = '\0'; 1429 1430 /* 1431 * Retry any UNIT ATTENTION type errors. They 1432 * are expected at boot. 1433 */ 1434 error = daerror(done_ccb, CAM_RETRY_SELTO, 1435 SF_RETRY_UA|SF_NO_PRINT); 1436 if (error == ERESTART) { 1437 /* 1438 * A retry was scheuled, so 1439 * just return. 1440 */ 1441 return; 1442 } else if (error != 0) { 1443 struct scsi_sense_data *sense; 1444 int asc, ascq; 1445 int sense_key, error_code; 1446 int have_sense; 1447 cam_status status; 1448 struct ccb_getdev cgd; 1449 1450 /* Don't wedge this device's queue */ 1451 status = done_ccb->ccb_h.status; 1452 if ((status & CAM_DEV_QFRZN) != 0) 1453 cam_release_devq(done_ccb->ccb_h.path, 1454 /*relsim_flags*/0, 1455 /*reduction*/0, 1456 /*timeout*/0, 1457 /*getcount_only*/0); 1458 1459 1460 xpt_setup_ccb(&cgd.ccb_h, 1461 done_ccb->ccb_h.path, 1462 /* priority */ 1); 1463 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1464 xpt_action((union ccb *)&cgd); 1465 1466 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0) 1467 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0) 1468 || ((status & CAM_AUTOSNS_VALID) == 0)) 1469 have_sense = FALSE; 1470 else 1471 have_sense = TRUE; 1472 1473 if (have_sense) { 1474 sense = &csio->sense_data; 1475 scsi_extract_sense(sense, &error_code, 1476 &sense_key, 1477 &asc, &ascq); 1478 } 1479 /* 1480 * Attach to anything that claims to be a 1481 * direct access or optical disk device, 1482 * as long as it doesn't return a "Logical 1483 * unit not supported" (0x25) error. 1484 */ 1485 if ((have_sense) && (asc != 0x25) 1486 && (error_code == SSD_CURRENT_ERROR)) { 1487 const char *sense_key_desc; 1488 const char *asc_desc; 1489 1490 scsi_sense_desc(sense_key, asc, ascq, 1491 &cgd.inq_data, 1492 &sense_key_desc, 1493 &asc_desc); 1494 snprintf(announce_buf, 1495 sizeof(announce_buf), 1496 "Attempt to query device " 1497 "size failed: %s, %s", 1498 sense_key_desc, 1499 asc_desc); 1500 } else { 1501 if (have_sense) 1502 scsi_sense_print( 1503 &done_ccb->csio); 1504 else { 1505 xpt_print_path(periph->path); 1506 printf("got CAM status %#x\n", 1507 done_ccb->ccb_h.status); 1508 } 1509 1510 xpt_print_path(periph->path); 1511 printf("fatal error, failed" 1512 " to attach to device\n"); 1513 1514 /* 1515 * Free up resources. 1516 */ 1517 cam_periph_invalidate(periph); 1518 } 1519 } 1520 } 1521 free(csio->data_ptr, M_TEMP); 1522 if (announce_buf[0] != '\0') { 1523 xpt_announce_periph(periph, announce_buf); 1524 /* 1525 * Create our sysctl variables, now that we know 1526 * we have successfully attached. 1527 */ 1528 taskqueue_enqueue(taskqueue_thread,&softc->sysctl_task); 1529 } 1530 softc->state = DA_STATE_NORMAL; 1531 /* 1532 * Since our peripheral may be invalidated by an error 1533 * above or an external event, we must release our CCB 1534 * before releasing the probe lock on the peripheral. 1535 * The peripheral will only go away once the last lock 1536 * is removed, and we need it around for the CCB release 1537 * operation. 1538 */ 1539 xpt_release_ccb(done_ccb); 1540 cam_periph_unlock(periph); 1541 return; 1542 } 1543 case DA_CCB_WAITING: 1544 { 1545 /* Caller will release the CCB */ 1546 wakeup(&done_ccb->ccb_h.cbfcnp); 1547 return; 1548 } 1549 case DA_CCB_DUMP: 1550 /* No-op. We're polling */ 1551 return; 1552 default: 1553 break; 1554 } 1555 xpt_release_ccb(done_ccb); 1556 } 1557 1558 static int 1559 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1560 { 1561 struct da_softc *softc; 1562 struct cam_periph *periph; 1563 int error; 1564 1565 periph = xpt_path_periph(ccb->ccb_h.path); 1566 softc = (struct da_softc *)periph->softc; 1567 1568 /* 1569 * Automatically detect devices that do not support 1570 * READ(6)/WRITE(6) and upgrade to using 10 byte cdbs. 1571 */ 1572 error = 0; 1573 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INVALID) { 1574 error = cmd6workaround(ccb); 1575 } else if (((ccb->ccb_h.status & CAM_STATUS_MASK) == 1576 CAM_SCSI_STATUS_ERROR) 1577 && (ccb->ccb_h.status & CAM_AUTOSNS_VALID) 1578 && (ccb->csio.scsi_status == SCSI_STATUS_CHECK_COND) 1579 && ((ccb->ccb_h.flags & CAM_SENSE_PHYS) == 0) 1580 && ((ccb->ccb_h.flags & CAM_SENSE_PTR) == 0)) { 1581 int sense_key, error_code, asc, ascq; 1582 1583 scsi_extract_sense(&ccb->csio.sense_data, 1584 &error_code, &sense_key, &asc, &ascq); 1585 if (sense_key == SSD_KEY_ILLEGAL_REQUEST) 1586 error = cmd6workaround(ccb); 1587 } 1588 if (error == ERESTART) 1589 return (ERESTART); 1590 1591 /* 1592 * XXX 1593 * Until we have a better way of doing pack validation, 1594 * don't treat UAs as errors. 1595 */ 1596 sense_flags |= SF_RETRY_UA; 1597 return(cam_periph_error(ccb, cam_flags, sense_flags, 1598 &softc->saved_ccb)); 1599 } 1600 1601 static void 1602 daprevent(struct cam_periph *periph, int action) 1603 { 1604 struct da_softc *softc; 1605 union ccb *ccb; 1606 int error; 1607 1608 softc = (struct da_softc *)periph->softc; 1609 1610 if (((action == PR_ALLOW) 1611 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 1612 || ((action == PR_PREVENT) 1613 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 1614 return; 1615 } 1616 1617 ccb = cam_periph_getccb(periph, /*priority*/1); 1618 1619 scsi_prevent(&ccb->csio, 1620 /*retries*/1, 1621 /*cbcfp*/dadone, 1622 MSG_SIMPLE_Q_TAG, 1623 action, 1624 SSD_FULL_SIZE, 1625 5000); 1626 1627 error = cam_periph_runccb(ccb, /*error_routine*/NULL, CAM_RETRY_SELTO, 1628 SF_RETRY_UA, softc->disk->d_devstat); 1629 1630 if (error == 0) { 1631 if (action == PR_ALLOW) 1632 softc->flags &= ~DA_FLAG_PACK_LOCKED; 1633 else 1634 softc->flags |= DA_FLAG_PACK_LOCKED; 1635 } 1636 1637 xpt_release_ccb(ccb); 1638 } 1639 1640 static int 1641 dagetcapacity(struct cam_periph *periph) 1642 { 1643 struct da_softc *softc; 1644 union ccb *ccb; 1645 struct scsi_read_capacity_data *rcap; 1646 struct scsi_read_capacity_data_long *rcaplong; 1647 uint32_t block_len; 1648 uint64_t maxsector; 1649 int error; 1650 1651 softc = (struct da_softc *)periph->softc; 1652 block_len = 0; 1653 maxsector = 0; 1654 error = 0; 1655 1656 /* Do a read capacity */ 1657 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcaplong), 1658 M_TEMP, 1659 M_WAITOK); 1660 1661 ccb = cam_periph_getccb(periph, /*priority*/1); 1662 scsi_read_capacity(&ccb->csio, 1663 /*retries*/4, 1664 /*cbfncp*/dadone, 1665 MSG_SIMPLE_Q_TAG, 1666 rcap, 1667 SSD_FULL_SIZE, 1668 /*timeout*/60000); 1669 ccb->ccb_h.ccb_bp = NULL; 1670 1671 error = cam_periph_runccb(ccb, daerror, 1672 /*cam_flags*/CAM_RETRY_SELTO, 1673 /*sense_flags*/SF_RETRY_UA, 1674 softc->disk->d_devstat); 1675 1676 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1677 cam_release_devq(ccb->ccb_h.path, 1678 /*relsim_flags*/0, 1679 /*reduction*/0, 1680 /*timeout*/0, 1681 /*getcount_only*/0); 1682 1683 if (error == 0) { 1684 block_len = scsi_4btoul(rcap->length); 1685 maxsector = scsi_4btoul(rcap->addr); 1686 1687 if (maxsector != 0xffffffff) 1688 goto done; 1689 } else 1690 goto done; 1691 1692 rcaplong = (struct scsi_read_capacity_data_long *)rcap; 1693 1694 scsi_read_capacity_16(&ccb->csio, 1695 /*retries*/ 4, 1696 /*cbfcnp*/ dadone, 1697 /*tag_action*/ MSG_SIMPLE_Q_TAG, 1698 /*lba*/ 0, 1699 /*reladr*/ 0, 1700 /*pmi*/ 0, 1701 rcaplong, 1702 /*sense_len*/ SSD_FULL_SIZE, 1703 /*timeout*/ 60000); 1704 ccb->ccb_h.ccb_bp = NULL; 1705 1706 error = cam_periph_runccb(ccb, daerror, 1707 /*cam_flags*/CAM_RETRY_SELTO, 1708 /*sense_flags*/SF_RETRY_UA, 1709 softc->disk->d_devstat); 1710 1711 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1712 cam_release_devq(ccb->ccb_h.path, 1713 /*relsim_flags*/0, 1714 /*reduction*/0, 1715 /*timeout*/0, 1716 /*getcount_only*/0); 1717 1718 if (error == 0) { 1719 block_len = scsi_4btoul(rcaplong->length); 1720 maxsector = scsi_8btou64(rcaplong->addr); 1721 } 1722 1723 done: 1724 1725 if (error == 0) 1726 dasetgeom(periph, block_len, maxsector); 1727 1728 xpt_release_ccb(ccb); 1729 1730 free(rcap, M_TEMP); 1731 1732 return (error); 1733 } 1734 1735 static void 1736 dasetgeom(struct cam_periph *periph, uint32_t block_len, uint64_t maxsector) 1737 { 1738 struct ccb_calc_geometry ccg; 1739 struct da_softc *softc; 1740 struct disk_params *dp; 1741 1742 softc = (struct da_softc *)periph->softc; 1743 1744 dp = &softc->params; 1745 dp->secsize = block_len; 1746 dp->sectors = maxsector + 1; 1747 /* 1748 * Have the controller provide us with a geometry 1749 * for this disk. The only time the geometry 1750 * matters is when we boot and the controller 1751 * is the only one knowledgeable enough to come 1752 * up with something that will make this a bootable 1753 * device. 1754 */ 1755 xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1); 1756 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 1757 ccg.block_size = dp->secsize; 1758 ccg.volume_size = dp->sectors; 1759 ccg.heads = 0; 1760 ccg.secs_per_track = 0; 1761 ccg.cylinders = 0; 1762 xpt_action((union ccb*)&ccg); 1763 dp->heads = ccg.heads; 1764 dp->secs_per_track = ccg.secs_per_track; 1765 dp->cylinders = ccg.cylinders; 1766 } 1767 1768 static void 1769 dasendorderedtag(void *arg) 1770 { 1771 struct da_softc *softc; 1772 int s; 1773 1774 for (softc = SLIST_FIRST(&softc_list); 1775 softc != NULL; 1776 softc = SLIST_NEXT(softc, links)) { 1777 s = splsoftcam(); 1778 if ((softc->ordered_tag_count == 0) 1779 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) { 1780 softc->flags |= DA_FLAG_NEED_OTAG; 1781 } 1782 if (softc->outstanding_cmds > 0) 1783 softc->flags &= ~DA_FLAG_WENT_IDLE; 1784 1785 softc->ordered_tag_count = 0; 1786 splx(s); 1787 } 1788 /* Queue us up again */ 1789 timeout(dasendorderedtag, NULL, 1790 (da_default_timeout * hz) / DA_ORDEREDTAG_INTERVAL); 1791 } 1792 1793 /* 1794 * Step through all DA peripheral drivers, and if the device is still open, 1795 * sync the disk cache to physical media. 1796 */ 1797 static void 1798 dashutdown(void * arg, int howto) 1799 { 1800 struct cam_periph *periph; 1801 struct da_softc *softc; 1802 1803 TAILQ_FOREACH(periph, &dadriver.units, unit_links) { 1804 union ccb ccb; 1805 softc = (struct da_softc *)periph->softc; 1806 1807 /* 1808 * We only sync the cache if the drive is still open, and 1809 * if the drive is capable of it.. 1810 */ 1811 if (((softc->flags & DA_FLAG_OPEN) == 0) 1812 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) 1813 continue; 1814 1815 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1); 1816 1817 ccb.ccb_h.ccb_state = DA_CCB_DUMP; 1818 scsi_synchronize_cache(&ccb.csio, 1819 /*retries*/1, 1820 /*cbfcnp*/dadone, 1821 MSG_SIMPLE_Q_TAG, 1822 /*begin_lba*/0, /* whole disk */ 1823 /*lb_count*/0, 1824 SSD_FULL_SIZE, 1825 60 * 60 * 1000); 1826 1827 xpt_polled_action(&ccb); 1828 1829 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1830 if (((ccb.ccb_h.status & CAM_STATUS_MASK) == 1831 CAM_SCSI_STATUS_ERROR) 1832 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){ 1833 int error_code, sense_key, asc, ascq; 1834 1835 scsi_extract_sense(&ccb.csio.sense_data, 1836 &error_code, &sense_key, 1837 &asc, &ascq); 1838 1839 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 1840 scsi_sense_print(&ccb.csio); 1841 } else { 1842 xpt_print_path(periph->path); 1843 printf("Synchronize cache failed, status " 1844 "== 0x%x, scsi status == 0x%x\n", 1845 ccb.ccb_h.status, ccb.csio.scsi_status); 1846 } 1847 } 1848 1849 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 1850 cam_release_devq(ccb.ccb_h.path, 1851 /*relsim_flags*/0, 1852 /*reduction*/0, 1853 /*timeout*/0, 1854 /*getcount_only*/0); 1855 1856 } 1857 } 1858 1859 #else /* !_KERNEL */ 1860 1861 /* 1862 * XXX This is only left out of the kernel build to silence warnings. If, 1863 * for some reason this function is used in the kernel, the ifdefs should 1864 * be moved so it is included both in the kernel and userland. 1865 */ 1866 void 1867 scsi_format_unit(struct ccb_scsiio *csio, u_int32_t retries, 1868 void (*cbfcnp)(struct cam_periph *, union ccb *), 1869 u_int8_t tag_action, u_int8_t byte2, u_int16_t ileave, 1870 u_int8_t *data_ptr, u_int32_t dxfer_len, u_int8_t sense_len, 1871 u_int32_t timeout) 1872 { 1873 struct scsi_format_unit *scsi_cmd; 1874 1875 scsi_cmd = (struct scsi_format_unit *)&csio->cdb_io.cdb_bytes; 1876 scsi_cmd->opcode = FORMAT_UNIT; 1877 scsi_cmd->byte2 = byte2; 1878 scsi_ulto2b(ileave, scsi_cmd->interleave); 1879 1880 cam_fill_csio(csio, 1881 retries, 1882 cbfcnp, 1883 /*flags*/ (dxfer_len > 0) ? CAM_DIR_OUT : CAM_DIR_NONE, 1884 tag_action, 1885 data_ptr, 1886 dxfer_len, 1887 sense_len, 1888 sizeof(*scsi_cmd), 1889 timeout); 1890 } 1891 1892 #endif /* _KERNEL */ 1893