1 /* 2 * Implementation of SCSI Direct Access Peripheral driver for CAM. 3 * 4 * Copyright (c) 1997 Justin T. Gibbs. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions, and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. The name of the author may not be used to endorse or promote products 14 * derived from this software without specific prior written permission. 15 * 16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR 20 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $Id: scsi_da.c,v 1.18 1999/01/05 20:43:41 mjacob Exp $ 29 */ 30 31 #include "opt_hw_wdog.h" 32 33 #include <sys/param.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/buf.h> 37 #include <sys/devicestat.h> 38 #include <sys/dkbad.h> 39 #include <sys/disklabel.h> 40 #include <sys/diskslice.h> 41 #include <sys/malloc.h> 42 #include <sys/conf.h> 43 44 #include <machine/cons.h> 45 #include <machine/md_var.h> 46 47 #include <vm/vm.h> 48 #include <vm/vm_prot.h> 49 #include <vm/pmap.h> 50 51 #include <cam/cam.h> 52 #include <cam/cam_ccb.h> 53 #include <cam/cam_extend.h> 54 #include <cam/cam_periph.h> 55 #include <cam/cam_xpt_periph.h> 56 57 #include <cam/scsi/scsi_message.h> 58 59 typedef enum { 60 DA_STATE_PROBE, 61 DA_STATE_NORMAL 62 } da_state; 63 64 typedef enum { 65 DA_FLAG_PACK_INVALID = 0x001, 66 DA_FLAG_NEW_PACK = 0x002, 67 DA_FLAG_PACK_LOCKED = 0x004, 68 DA_FLAG_PACK_REMOVABLE = 0x008, 69 DA_FLAG_TAGGED_QUEUING = 0x010, 70 DA_FLAG_NEED_OTAG = 0x020, 71 DA_FLAG_WENT_IDLE = 0x040, 72 DA_FLAG_RETRY_UA = 0x080, 73 DA_FLAG_OPEN = 0x100 74 } da_flags; 75 76 typedef enum { 77 DA_Q_NONE = 0x00, 78 DA_Q_NO_SYNC_CACHE = 0x01, 79 DA_Q_NO_6_BYTE = 0x02 80 } da_quirks; 81 82 typedef enum { 83 DA_CCB_PROBE = 0x01, 84 DA_CCB_BUFFER_IO = 0x02, 85 DA_CCB_WAITING = 0x03, 86 DA_CCB_DUMP = 0x04, 87 DA_CCB_TYPE_MASK = 0x0F, 88 DA_CCB_RETRY_UA = 0x10 89 } da_ccb_state; 90 91 /* Offsets into our private area for storing information */ 92 #define ccb_state ppriv_field0 93 #define ccb_bp ppriv_ptr1 94 95 struct disk_params { 96 u_int8_t heads; 97 u_int16_t cylinders; 98 u_int8_t secs_per_track; 99 u_int32_t secsize; /* Number of bytes/sector */ 100 u_int32_t sectors; /* total number sectors */ 101 }; 102 103 struct da_softc { 104 struct buf_queue_head buf_queue; 105 struct devstat device_stats; 106 SLIST_ENTRY(da_softc) links; 107 LIST_HEAD(, ccb_hdr) pending_ccbs; 108 da_state state; 109 da_flags flags; 110 da_quirks quirks; 111 int minimum_cmd_size; 112 int ordered_tag_count; 113 struct disk_params params; 114 struct diskslices *dk_slices; /* virtual drives */ 115 union ccb saved_ccb; 116 }; 117 118 struct da_quirk_entry { 119 struct scsi_inquiry_pattern inq_pat; 120 da_quirks quirks; 121 }; 122 123 static struct da_quirk_entry da_quirk_table[] = 124 { 125 { 126 /* 127 * This particular Fujitsu drive doesn't like the 128 * synchronize cache command. 129 * Reported by: Tom Jackson <toj@gorilla.net> 130 */ 131 {T_DIRECT, SIP_MEDIA_FIXED, "FUJITSU", "M2954*", "*"}, 132 /*quirks*/ DA_Q_NO_SYNC_CACHE 133 134 }, 135 { 136 /* 137 * This drive doesn't like the synchronize cache command 138 * either. Reported by: Matthew Jacob <mjacob@feral.com> 139 * in NetBSD PR kern/6027, August 24, 1998. 140 */ 141 {T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2217*", "*"}, 142 /*quirks*/ DA_Q_NO_SYNC_CACHE 143 }, 144 { 145 /* 146 * This drive doesn't like the synchronize cache command 147 * either. Reported by: Hellmuth Michaelis (hm@kts.org) 148 * (PR 8882). 149 */ 150 {T_DIRECT, SIP_MEDIA_FIXED, "MICROP", "2112*", "*"}, 151 /*quirks*/ DA_Q_NO_SYNC_CACHE 152 }, 153 { 154 /* 155 * Doesn't like the synchronize cache command. 156 * Reported by: Blaz Zupan <blaz@gold.amis.net> 157 */ 158 {T_DIRECT, SIP_MEDIA_FIXED, "NEC", "D3847*", "*"}, 159 /*quirks*/ DA_Q_NO_SYNC_CACHE 160 }, 161 { 162 /* 163 * Doesn't work correctly with 6 byte reads/writes. 164 * Returns illegal request, and points to byte 9 of the 165 * 6-byte CDB. 166 * Reported by: Adam McDougall <bsdx@spawnet.com> 167 */ 168 {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 4*", "*"}, 169 /*quirks*/ DA_Q_NO_6_BYTE 170 }, 171 { 172 /* 173 * See above. 174 */ 175 {T_DIRECT, SIP_MEDIA_FIXED, "QUANTUM", "VIKING 2*", "*"}, 176 /*quirks*/ DA_Q_NO_6_BYTE 177 } 178 }; 179 180 static d_open_t daopen; 181 static d_read_t daread; 182 static d_write_t dawrite; 183 static d_close_t daclose; 184 static d_strategy_t dastrategy; 185 static d_ioctl_t daioctl; 186 static d_dump_t dadump; 187 static d_psize_t dasize; 188 static periph_init_t dainit; 189 static void daasync(void *callback_arg, u_int32_t code, 190 struct cam_path *path, void *arg); 191 static periph_ctor_t daregister; 192 static periph_dtor_t dacleanup; 193 static periph_start_t dastart; 194 static periph_oninv_t daoninvalidate; 195 static void dadone(struct cam_periph *periph, 196 union ccb *done_ccb); 197 static int daerror(union ccb *ccb, u_int32_t cam_flags, 198 u_int32_t sense_flags); 199 static void daprevent(struct cam_periph *periph, int action); 200 static void dasetgeom(struct cam_periph *periph, 201 struct scsi_read_capacity_data * rdcap); 202 static timeout_t dasendorderedtag; 203 static void dashutdown(int howto, void *arg); 204 205 #ifndef DA_DEFAULT_TIMEOUT 206 #define DA_DEFAULT_TIMEOUT 60 /* Timeout in seconds */ 207 #endif 208 209 /* 210 * DA_ORDEREDTAG_INTERVAL determines how often, relative 211 * to the default timeout, we check to see whether an ordered 212 * tagged transaction is appropriate to prevent simple tag 213 * starvation. Since we'd like to ensure that there is at least 214 * 1/2 of the timeout length left for a starved transaction to 215 * complete after we've sent an ordered tag, we must poll at least 216 * four times in every timeout period. This takes care of the worst 217 * case where a starved transaction starts during an interval that 218 * meets the requirement "don't send an ordered tag" test so it takes 219 * us two intervals to determine that a tag must be sent. 220 */ 221 #ifndef DA_ORDEREDTAG_INTERVAL 222 #define DA_ORDEREDTAG_INTERVAL 4 223 #endif 224 225 static struct periph_driver dadriver = 226 { 227 dainit, "da", 228 TAILQ_HEAD_INITIALIZER(dadriver.units), /* generation */ 0 229 }; 230 231 DATA_SET(periphdriver_set, dadriver); 232 233 #define DA_CDEV_MAJOR 13 234 #define DA_BDEV_MAJOR 4 235 236 /* For 2.2-stable support */ 237 #ifndef D_DISK 238 #define D_DISK 0 239 #endif 240 241 static struct cdevsw da_cdevsw = 242 { 243 /*d_open*/ daopen, 244 /*d_close*/ daclose, 245 /*d_read*/ daread, 246 /*d_write*/ dawrite, 247 /*d_ioctl*/ daioctl, 248 /*d_stop*/ nostop, 249 /*d_reset*/ noreset, 250 /*d_devtotty*/ nodevtotty, 251 /*d_poll*/ seltrue, 252 /*d_mmap*/ nommap, 253 /*d_strategy*/ dastrategy, 254 /*d_name*/ "da", 255 /*d_spare*/ NULL, 256 /*d_maj*/ -1, 257 /*d_dump*/ dadump, 258 /*d_psize*/ dasize, 259 /*d_flags*/ D_DISK, 260 /*d_maxio*/ 0, 261 /*b_maj*/ -1 262 }; 263 264 static SLIST_HEAD(,da_softc) softc_list; 265 static struct extend_array *daperiphs; 266 267 static int 268 daopen(dev_t dev, int flags, int fmt, struct proc *p) 269 { 270 struct cam_periph *periph; 271 struct da_softc *softc; 272 struct disklabel label; 273 int unit; 274 int part; 275 int error; 276 int s; 277 278 unit = dkunit(dev); 279 part = dkpart(dev); 280 periph = cam_extend_get(daperiphs, unit); 281 if (periph == NULL) 282 return (ENXIO); 283 284 softc = (struct da_softc *)periph->softc; 285 286 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 287 ("daopen: dev=0x%x (unit %d , partition %d)\n", dev, 288 unit, part)); 289 290 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { 291 return (error); /* error code from tsleep */ 292 } 293 294 if ((softc->flags & DA_FLAG_OPEN) == 0) { 295 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 296 return(ENXIO); 297 softc->flags |= DA_FLAG_OPEN; 298 } 299 300 s = splsoftcam(); 301 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0) { 302 /* 303 * If any partition is open, although the disk has 304 * been invalidated, disallow further opens. 305 */ 306 if (dsisopen(softc->dk_slices)) { 307 splx(s); 308 cam_periph_unlock(periph); 309 return (ENXIO); 310 } 311 312 /* Invalidate our pack information. */ 313 dsgone(&softc->dk_slices); 314 softc->flags &= ~DA_FLAG_PACK_INVALID; 315 } 316 splx(s); 317 318 /* Do a read capacity */ 319 { 320 struct scsi_read_capacity_data *rcap; 321 union ccb *ccb; 322 323 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 324 M_TEMP, 325 M_WAITOK); 326 327 ccb = cam_periph_getccb(periph, /*priority*/1); 328 scsi_read_capacity(&ccb->csio, 329 /*retries*/1, 330 /*cbfncp*/dadone, 331 MSG_SIMPLE_Q_TAG, 332 rcap, 333 SSD_FULL_SIZE, 334 /*timeout*/60000); 335 ccb->ccb_h.ccb_bp = NULL; 336 337 error = cam_periph_runccb(ccb, daerror, /*cam_flags*/0, 338 /*sense_flags*/SF_RETRY_UA, 339 &softc->device_stats); 340 341 xpt_release_ccb(ccb); 342 343 if (error == 0) { 344 dasetgeom(periph, rcap); 345 } 346 347 free(rcap, M_TEMP); 348 } 349 350 if (error == 0) { 351 struct ccb_getdev cgd; 352 353 /* Build label for whole disk. */ 354 bzero(&label, sizeof(label)); 355 label.d_type = DTYPE_SCSI; 356 357 /* 358 * Grab the inquiry data to get the vendor and product names. 359 * Put them in the typename and packname for the label. 360 */ 361 xpt_setup_ccb(&cgd.ccb_h, periph->path, /*priority*/ 1); 362 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 363 xpt_action((union ccb *)&cgd); 364 365 strncpy(label.d_typename, cgd.inq_data.vendor, 366 min(SID_VENDOR_SIZE, sizeof(label.d_typename))); 367 strncpy(label.d_packname, cgd.inq_data.product, 368 min(SID_PRODUCT_SIZE, sizeof(label.d_packname))); 369 370 label.d_secsize = softc->params.secsize; 371 label.d_nsectors = softc->params.secs_per_track; 372 label.d_ntracks = softc->params.heads; 373 label.d_ncylinders = softc->params.cylinders; 374 label.d_secpercyl = softc->params.heads 375 * softc->params.secs_per_track; 376 label.d_secperunit = softc->params.sectors; 377 378 if ((dsisopen(softc->dk_slices) == 0) 379 && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) { 380 daprevent(periph, PR_PREVENT); 381 } 382 383 /* Initialize slice tables. */ 384 error = dsopen("da", dev, fmt, 0, &softc->dk_slices, &label, 385 dastrategy, (ds_setgeom_t *)NULL, 386 &da_cdevsw); 387 388 /* 389 * Check to see whether or not the blocksize is set yet. 390 * If it isn't, set it and then clear the blocksize 391 * unavailable flag for the device statistics. 392 */ 393 if ((softc->device_stats.flags & DEVSTAT_BS_UNAVAILABLE) != 0){ 394 softc->device_stats.block_size = softc->params.secsize; 395 softc->device_stats.flags &= ~DEVSTAT_BS_UNAVAILABLE; 396 } 397 } 398 399 if (error != 0) { 400 if ((dsisopen(softc->dk_slices) == 0) 401 && ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0)) { 402 daprevent(periph, PR_ALLOW); 403 } 404 } 405 cam_periph_unlock(periph); 406 return (error); 407 } 408 409 static int 410 daclose(dev_t dev, int flag, int fmt, struct proc *p) 411 { 412 struct cam_periph *periph; 413 struct da_softc *softc; 414 int unit; 415 int error; 416 417 unit = dkunit(dev); 418 periph = cam_extend_get(daperiphs, unit); 419 if (periph == NULL) 420 return (ENXIO); 421 422 softc = (struct da_softc *)periph->softc; 423 424 if ((error = cam_periph_lock(periph, PRIBIO)) != 0) { 425 return (error); /* error code from tsleep */ 426 } 427 428 dsclose(dev, fmt, softc->dk_slices); 429 if (dsisopen(softc->dk_slices)) { 430 cam_periph_unlock(periph); 431 return (0); 432 } 433 434 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 435 union ccb *ccb; 436 437 ccb = cam_periph_getccb(periph, /*priority*/1); 438 439 scsi_synchronize_cache(&ccb->csio, 440 /*retries*/1, 441 /*cbfcnp*/dadone, 442 MSG_SIMPLE_Q_TAG, 443 /*begin_lba*/0,/* Cover the whole disk */ 444 /*lb_count*/0, 445 SSD_FULL_SIZE, 446 5 * 60 * 1000); 447 448 /* Ignore any errors */ 449 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 450 /*sense_flags*/SF_RETRY_UA, 451 &softc->device_stats); 452 453 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 454 cam_release_devq(ccb->ccb_h.path, 455 /*relsim_flags*/0, 456 /*reduction*/0, 457 /*timeout*/0, 458 /*getcount_only*/0); 459 460 xpt_release_ccb(ccb); 461 462 } 463 464 if ((softc->flags & DA_FLAG_PACK_REMOVABLE) != 0) { 465 daprevent(periph, PR_ALLOW); 466 /* 467 * If we've got removeable media, mark the blocksize as 468 * unavailable, since it could change when new media is 469 * inserted. 470 */ 471 softc->device_stats.flags |= DEVSTAT_BS_UNAVAILABLE; 472 } 473 474 softc->flags &= ~DA_FLAG_OPEN; 475 cam_periph_unlock(periph); 476 cam_periph_release(periph); 477 return (0); 478 } 479 480 static int 481 daread(dev_t dev, struct uio *uio, int ioflag) 482 { 483 return(physio(dastrategy, NULL, dev, 1, minphys, uio)); 484 } 485 486 static int 487 dawrite(dev_t dev, struct uio *uio, int ioflag) 488 { 489 return(physio(dastrategy, NULL, dev, 0, minphys, uio)); 490 } 491 492 /* 493 * Actually translate the requested transfer into one the physical driver 494 * can understand. The transfer is described by a buf and will include 495 * only one physical transfer. 496 */ 497 static void 498 dastrategy(struct buf *bp) 499 { 500 struct cam_periph *periph; 501 struct da_softc *softc; 502 u_int unit; 503 u_int part; 504 int s; 505 506 unit = dkunit(bp->b_dev); 507 part = dkpart(bp->b_dev); 508 periph = cam_extend_get(daperiphs, unit); 509 if (periph == NULL) { 510 bp->b_error = ENXIO; 511 goto bad; 512 } 513 softc = (struct da_softc *)periph->softc; 514 #if 0 515 /* 516 * check it's not too big a transfer for our adapter 517 */ 518 scsi_minphys(bp,&sd_switch); 519 #endif 520 521 /* 522 * Do bounds checking, adjust transfer, set b_cylin and b_pbklno. 523 */ 524 if (dscheck(bp, softc->dk_slices) <= 0) 525 goto done; 526 527 /* 528 * Mask interrupts so that the pack cannot be invalidated until 529 * after we are in the queue. Otherwise, we might not properly 530 * clean up one of the buffers. 531 */ 532 s = splbio(); 533 534 /* 535 * If the device has been made invalid, error out 536 */ 537 if ((softc->flags & DA_FLAG_PACK_INVALID)) { 538 splx(s); 539 bp->b_error = ENXIO; 540 goto bad; 541 } 542 543 /* 544 * Place it in the queue of disk activities for this disk 545 */ 546 bufqdisksort(&softc->buf_queue, bp); 547 548 splx(s); 549 550 /* 551 * Schedule ourselves for performing the work. 552 */ 553 xpt_schedule(periph, /* XXX priority */1); 554 555 return; 556 bad: 557 bp->b_flags |= B_ERROR; 558 done: 559 560 /* 561 * Correctly set the buf to indicate a completed xfer 562 */ 563 bp->b_resid = bp->b_bcount; 564 biodone(bp); 565 return; 566 } 567 568 /* For 2.2-stable support */ 569 #ifndef ENOIOCTL 570 #define ENOIOCTL -1 571 #endif 572 573 static int 574 daioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p) 575 { 576 struct cam_periph *periph; 577 struct da_softc *softc; 578 int unit; 579 int error; 580 581 unit = dkunit(dev); 582 periph = cam_extend_get(daperiphs, unit); 583 if (periph == NULL) 584 return (ENXIO); 585 586 softc = (struct da_softc *)periph->softc; 587 588 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, ("daioctl\n")); 589 590 if (cmd == DIOCSBAD) 591 return (EINVAL); /* XXX */ 592 593 if ((error = cam_periph_lock(periph, PRIBIO|PCATCH)) != 0) { 594 return (error); /* error code from tsleep */ 595 } 596 597 error = dsioctl("da", dev, cmd, addr, flag, &softc->dk_slices, 598 dastrategy, (ds_setgeom_t *)NULL); 599 600 if (error == ENOIOCTL) 601 error = cam_periph_ioctl(periph, cmd, addr, daerror); 602 603 cam_periph_unlock(periph); 604 605 return (error); 606 } 607 608 static int 609 dadump(dev_t dev) 610 { 611 struct cam_periph *periph; 612 struct da_softc *softc; 613 struct disklabel *lp; 614 u_int unit; 615 u_int part; 616 long num; /* number of sectors to write */ 617 long blkoff; 618 long blknum; 619 long blkcnt; 620 vm_offset_t addr; 621 static int dadoingadump = 0; 622 struct ccb_scsiio csio; 623 624 /* toss any characters present prior to dump */ 625 while (cncheckc() != -1) 626 ; 627 628 unit = dkunit(dev); 629 part = dkpart(dev); 630 periph = cam_extend_get(daperiphs, unit); 631 if (periph == NULL) { 632 return (ENXIO); 633 } 634 softc = (struct da_softc *)periph->softc; 635 636 if ((softc->flags & DA_FLAG_PACK_INVALID) != 0 637 || (softc->dk_slices == NULL) 638 || (lp = dsgetlabel(dev, softc->dk_slices)) == NULL) 639 return (ENXIO); 640 641 /* Size of memory to dump, in disk sectors. */ 642 /* XXX Fix up for non DEV_BSIZE sectors!!! */ 643 num = (u_long)Maxmem * PAGE_SIZE / softc->params.secsize; 644 645 blkoff = lp->d_partitions[part].p_offset; 646 blkoff += softc->dk_slices->dss_slices[dkslice(dev)].ds_offset; 647 648 /* check transfer bounds against partition size */ 649 if ((dumplo < 0) || ((dumplo + num) > lp->d_partitions[part].p_size)) 650 return (EINVAL); 651 652 if (dadoingadump != 0) 653 return (EFAULT); 654 655 dadoingadump = 1; 656 657 blknum = dumplo + blkoff; 658 blkcnt = PAGE_SIZE / softc->params.secsize; 659 660 addr = 0; /* starting address */ 661 662 while (num > 0) { 663 664 if (is_physical_memory(addr)) { 665 pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, 666 trunc_page(addr), VM_PROT_READ, TRUE); 667 } else { 668 pmap_enter(kernel_pmap, (vm_offset_t)CADDR1, 669 trunc_page(0), VM_PROT_READ, TRUE); 670 } 671 672 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 673 csio.ccb_h.ccb_state = DA_CCB_DUMP; 674 scsi_read_write(&csio, 675 /*retries*/1, 676 dadone, 677 MSG_ORDERED_Q_TAG, 678 /*read*/FALSE, 679 /*byte2*/0, 680 /*minimum_cmd_size*/ softc->minimum_cmd_size, 681 blknum, 682 blkcnt, 683 /*data_ptr*/CADDR1, 684 /*dxfer_len*/blkcnt * softc->params.secsize, 685 /*sense_len*/SSD_FULL_SIZE, 686 DA_DEFAULT_TIMEOUT * 1000); 687 xpt_polled_action((union ccb *)&csio); 688 689 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 690 printf("Aborting dump due to I/O error.\n"); 691 if ((csio.ccb_h.status & CAM_STATUS_MASK) == 692 CAM_SCSI_STATUS_ERROR) 693 scsi_sense_print(&csio); 694 else 695 printf("status == 0x%x, scsi status == 0x%x\n", 696 csio.ccb_h.status, csio.scsi_status); 697 return(EIO); 698 } 699 700 if (addr % (1024 * 1024) == 0) { 701 #ifdef HW_WDOG 702 if (wdog_tickler) 703 (*wdog_tickler)(); 704 #endif /* HW_WDOG */ 705 /* Count in MB of data left to write */ 706 printf("%ld ", (num * softc->params.secsize) 707 / (1024 * 1024)); 708 } 709 710 /* update block count */ 711 num -= blkcnt; 712 blknum += blkcnt; 713 addr += blkcnt * softc->params.secsize; 714 715 /* operator aborting dump? */ 716 if (cncheckc() != -1) 717 return (EINTR); 718 } 719 720 /* 721 * Sync the disk cache contents to the physical media. 722 */ 723 if ((softc->quirks & DA_Q_NO_SYNC_CACHE) == 0) { 724 725 xpt_setup_ccb(&csio.ccb_h, periph->path, /*priority*/1); 726 csio.ccb_h.ccb_state = DA_CCB_DUMP; 727 scsi_synchronize_cache(&csio, 728 /*retries*/1, 729 /*cbfcnp*/dadone, 730 MSG_SIMPLE_Q_TAG, 731 /*begin_lba*/0,/* Cover the whole disk */ 732 /*lb_count*/0, 733 SSD_FULL_SIZE, 734 5 * 60 * 1000); 735 xpt_polled_action((union ccb *)&csio); 736 737 if ((csio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 738 if ((csio.ccb_h.status & CAM_STATUS_MASK) == 739 CAM_SCSI_STATUS_ERROR) 740 scsi_sense_print(&csio); 741 else { 742 xpt_print_path(periph->path); 743 printf("Synchronize cache failed, status " 744 "== 0x%x, scsi status == 0x%x\n", 745 csio.ccb_h.status, csio.scsi_status); 746 } 747 } 748 } 749 return (0); 750 } 751 752 static int 753 dasize(dev_t dev) 754 { 755 struct cam_periph *periph; 756 struct da_softc *softc; 757 758 periph = cam_extend_get(daperiphs, dkunit(dev)); 759 if (periph == NULL) 760 return (ENXIO); 761 762 softc = (struct da_softc *)periph->softc; 763 764 return (dssize(dev, &softc->dk_slices, daopen, daclose)); 765 } 766 767 static void 768 dainit(void) 769 { 770 cam_status status; 771 struct cam_path *path; 772 773 /* 774 * Create our extend array for storing the devices we attach to. 775 */ 776 daperiphs = cam_extend_new(); 777 SLIST_INIT(&softc_list); 778 if (daperiphs == NULL) { 779 printf("da: Failed to alloc extend array!\n"); 780 return; 781 } 782 783 /* 784 * Install a global async callback. This callback will 785 * receive async callbacks like "new device found". 786 */ 787 status = xpt_create_path(&path, /*periph*/NULL, CAM_XPT_PATH_ID, 788 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD); 789 790 if (status == CAM_REQ_CMP) { 791 struct ccb_setasync csa; 792 793 xpt_setup_ccb(&csa.ccb_h, path, /*priority*/5); 794 csa.ccb_h.func_code = XPT_SASYNC_CB; 795 csa.event_enable = AC_FOUND_DEVICE; 796 csa.callback = daasync; 797 csa.callback_arg = NULL; 798 xpt_action((union ccb *)&csa); 799 status = csa.ccb_h.status; 800 xpt_free_path(path); 801 } 802 803 if (status != CAM_REQ_CMP) { 804 printf("da: Failed to attach master async callback " 805 "due to status 0x%x!\n", status); 806 } else { 807 int err; 808 809 /* If we were successfull, register our devsw */ 810 cdevsw_add_generic(DA_BDEV_MAJOR, DA_CDEV_MAJOR, &da_cdevsw); 811 812 /* 813 * Schedule a periodic event to occasioanly send an 814 * ordered tag to a device. 815 */ 816 timeout(dasendorderedtag, NULL, 817 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL); 818 819 if ((err = at_shutdown(dashutdown, NULL, 820 SHUTDOWN_POST_SYNC)) != 0) 821 printf("dainit: at_shutdown returned %d!\n", err); 822 } 823 } 824 825 static void 826 daoninvalidate(struct cam_periph *periph) 827 { 828 int s; 829 struct da_softc *softc; 830 struct buf *q_bp; 831 struct ccb_setasync csa; 832 833 softc = (struct da_softc *)periph->softc; 834 835 /* 836 * De-register any async callbacks. 837 */ 838 xpt_setup_ccb(&csa.ccb_h, periph->path, 839 /* priority */ 5); 840 csa.ccb_h.func_code = XPT_SASYNC_CB; 841 csa.event_enable = 0; 842 csa.callback = daasync; 843 csa.callback_arg = periph; 844 xpt_action((union ccb *)&csa); 845 846 softc->flags |= DA_FLAG_PACK_INVALID; 847 848 /* 849 * Although the oninvalidate() routines are always called at 850 * splsoftcam, we need to be at splbio() here to keep the buffer 851 * queue from being modified while we traverse it. 852 */ 853 s = splbio(); 854 855 /* 856 * Return all queued I/O with ENXIO. 857 * XXX Handle any transactions queued to the card 858 * with XPT_ABORT_CCB. 859 */ 860 while ((q_bp = bufq_first(&softc->buf_queue)) != NULL){ 861 bufq_remove(&softc->buf_queue, q_bp); 862 q_bp->b_resid = q_bp->b_bcount; 863 q_bp->b_error = ENXIO; 864 q_bp->b_flags |= B_ERROR; 865 biodone(q_bp); 866 } 867 splx(s); 868 869 SLIST_REMOVE(&softc_list, softc, da_softc, links); 870 871 xpt_print_path(periph->path); 872 printf("lost device\n"); 873 } 874 875 static void 876 dacleanup(struct cam_periph *periph) 877 { 878 struct da_softc *softc; 879 880 softc = (struct da_softc *)periph->softc; 881 882 devstat_remove_entry(&softc->device_stats); 883 cam_extend_release(daperiphs, periph->unit_number); 884 xpt_print_path(periph->path); 885 printf("removing device entry\n"); 886 free(softc, M_DEVBUF); 887 } 888 889 static void 890 daasync(void *callback_arg, u_int32_t code, 891 struct cam_path *path, void *arg) 892 { 893 struct cam_periph *periph; 894 895 periph = (struct cam_periph *)callback_arg; 896 switch (code) { 897 case AC_FOUND_DEVICE: 898 { 899 struct ccb_getdev *cgd; 900 cam_status status; 901 902 cgd = (struct ccb_getdev *)arg; 903 904 if ((cgd->pd_type != T_DIRECT) && (cgd->pd_type != T_OPTICAL)) 905 break; 906 907 /* 908 * Allocate a peripheral instance for 909 * this device and start the probe 910 * process. 911 */ 912 status = cam_periph_alloc(daregister, daoninvalidate, 913 dacleanup, dastart, 914 "da", CAM_PERIPH_BIO, 915 cgd->ccb_h.path, daasync, 916 AC_FOUND_DEVICE, cgd); 917 918 if (status != CAM_REQ_CMP 919 && status != CAM_REQ_INPROG) 920 printf("daasync: Unable to attach to new device " 921 "due to status 0x%x\n", status); 922 break; 923 } 924 case AC_LOST_DEVICE: 925 cam_periph_invalidate(periph); 926 break; 927 case AC_SENT_BDR: 928 case AC_BUS_RESET: 929 { 930 struct da_softc *softc; 931 struct ccb_hdr *ccbh; 932 int s; 933 934 softc = (struct da_softc *)periph->softc; 935 s = splsoftcam(); 936 /* 937 * Don't fail on the expected unit attention 938 * that will occur. 939 */ 940 softc->flags |= DA_FLAG_RETRY_UA; 941 for (ccbh = LIST_FIRST(&softc->pending_ccbs); 942 ccbh != NULL; ccbh = LIST_NEXT(ccbh, periph_links.le)) 943 ccbh->ccb_state |= DA_CCB_RETRY_UA; 944 splx(s); 945 break; 946 } 947 case AC_TRANSFER_NEG: 948 case AC_SCSI_AEN: 949 case AC_UNSOL_RESEL: 950 default: 951 break; 952 } 953 } 954 955 static cam_status 956 daregister(struct cam_periph *periph, void *arg) 957 { 958 int s; 959 struct da_softc *softc; 960 struct ccb_setasync csa; 961 struct ccb_getdev *cgd; 962 caddr_t match; 963 964 cgd = (struct ccb_getdev *)arg; 965 if (periph == NULL) { 966 printf("daregister: periph was NULL!!\n"); 967 return(CAM_REQ_CMP_ERR); 968 } 969 970 if (cgd == NULL) { 971 printf("daregister: no getdev CCB, can't register device\n"); 972 return(CAM_REQ_CMP_ERR); 973 } 974 975 softc = (struct da_softc *)malloc(sizeof(*softc),M_DEVBUF,M_NOWAIT); 976 977 if (softc == NULL) { 978 printf("daregister: Unable to probe new device. " 979 "Unable to allocate softc\n"); 980 return(CAM_REQ_CMP_ERR); 981 } 982 983 bzero(softc, sizeof(*softc)); 984 LIST_INIT(&softc->pending_ccbs); 985 softc->state = DA_STATE_PROBE; 986 bufq_init(&softc->buf_queue); 987 if (SID_IS_REMOVABLE(&cgd->inq_data)) 988 softc->flags |= DA_FLAG_PACK_REMOVABLE; 989 if ((cgd->inq_data.flags & SID_CmdQue) != 0) 990 softc->flags |= DA_FLAG_TAGGED_QUEUING; 991 992 periph->softc = softc; 993 994 cam_extend_set(daperiphs, periph->unit_number, periph); 995 996 /* 997 * See if this device has any quirks. 998 */ 999 match = cam_quirkmatch((caddr_t)&cgd->inq_data, 1000 (caddr_t)da_quirk_table, 1001 sizeof(da_quirk_table)/sizeof(*da_quirk_table), 1002 sizeof(*da_quirk_table), scsi_inquiry_match); 1003 1004 if (match != NULL) 1005 softc->quirks = ((struct da_quirk_entry *)match)->quirks; 1006 else 1007 softc->quirks = DA_Q_NONE; 1008 1009 if (softc->quirks & DA_Q_NO_6_BYTE) 1010 softc->minimum_cmd_size = 10; 1011 else 1012 softc->minimum_cmd_size = 6; 1013 1014 /* 1015 * Block our timeout handler while we 1016 * add this softc to the dev list. 1017 */ 1018 s = splsoftclock(); 1019 SLIST_INSERT_HEAD(&softc_list, softc, links); 1020 splx(s); 1021 1022 /* 1023 * The DA driver supports a blocksize, but 1024 * we don't know the blocksize until we do 1025 * a read capacity. So, set a flag to 1026 * indicate that the blocksize is 1027 * unavailable right now. We'll clear the 1028 * flag as soon as we've done a read capacity. 1029 */ 1030 devstat_add_entry(&softc->device_stats, "da", 1031 periph->unit_number, 0, 1032 DEVSTAT_BS_UNAVAILABLE, 1033 cgd->pd_type | DEVSTAT_TYPE_IF_SCSI); 1034 1035 /* 1036 * Add async callbacks for bus reset and 1037 * bus device reset calls. I don't bother 1038 * checking if this fails as, in most cases, 1039 * the system will function just fine without 1040 * them and the only alternative would be to 1041 * not attach the device on failure. 1042 */ 1043 xpt_setup_ccb(&csa.ccb_h, periph->path, /*priority*/5); 1044 csa.ccb_h.func_code = XPT_SASYNC_CB; 1045 csa.event_enable = AC_SENT_BDR | AC_BUS_RESET | AC_LOST_DEVICE; 1046 csa.callback = daasync; 1047 csa.callback_arg = periph; 1048 xpt_action((union ccb *)&csa); 1049 /* 1050 * Lock this peripheral until we are setup. 1051 * This first call can't block 1052 */ 1053 (void)cam_periph_lock(periph, PRIBIO); 1054 xpt_schedule(periph, /*priority*/5); 1055 1056 return(CAM_REQ_CMP); 1057 } 1058 1059 static void 1060 dastart(struct cam_periph *periph, union ccb *start_ccb) 1061 { 1062 struct da_softc *softc; 1063 1064 softc = (struct da_softc *)periph->softc; 1065 1066 1067 switch (softc->state) { 1068 case DA_STATE_NORMAL: 1069 { 1070 /* Pull a buffer from the queue and get going on it */ 1071 struct buf *bp; 1072 int s; 1073 1074 /* 1075 * See if there is a buf with work for us to do.. 1076 */ 1077 s = splbio(); 1078 bp = bufq_first(&softc->buf_queue); 1079 if (periph->immediate_priority <= periph->pinfo.priority) { 1080 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 1081 ("queuing for immediate ccb\n")); 1082 start_ccb->ccb_h.ccb_state = DA_CCB_WAITING; 1083 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 1084 periph_links.sle); 1085 periph->immediate_priority = CAM_PRIORITY_NONE; 1086 splx(s); 1087 wakeup(&periph->ccb_list); 1088 } else if (bp == NULL) { 1089 splx(s); 1090 xpt_release_ccb(start_ccb); 1091 } else { 1092 int oldspl; 1093 u_int8_t tag_code; 1094 1095 bufq_remove(&softc->buf_queue, bp); 1096 1097 devstat_start_transaction(&softc->device_stats); 1098 1099 if ((bp->b_flags & B_ORDERED) != 0 1100 || (softc->flags & DA_FLAG_NEED_OTAG) != 0) { 1101 softc->flags &= ~DA_FLAG_NEED_OTAG; 1102 softc->ordered_tag_count++; 1103 tag_code = MSG_ORDERED_Q_TAG; 1104 } else { 1105 tag_code = MSG_SIMPLE_Q_TAG; 1106 } 1107 scsi_read_write(&start_ccb->csio, 1108 /*retries*/4, 1109 dadone, 1110 tag_code, 1111 bp->b_flags & B_READ, 1112 /*byte2*/0, 1113 softc->minimum_cmd_size, 1114 bp->b_pblkno, 1115 bp->b_bcount / softc->params.secsize, 1116 bp->b_data, 1117 bp->b_bcount, 1118 /*sense_len*/SSD_FULL_SIZE, 1119 DA_DEFAULT_TIMEOUT * 1000); 1120 start_ccb->ccb_h.ccb_state = DA_CCB_BUFFER_IO; 1121 1122 /* 1123 * Block out any asyncronous callbacks 1124 * while we touch the pending ccb list. 1125 */ 1126 oldspl = splcam(); 1127 LIST_INSERT_HEAD(&softc->pending_ccbs, 1128 &start_ccb->ccb_h, periph_links.le); 1129 splx(oldspl); 1130 1131 /* We expect a unit attention from this device */ 1132 if ((softc->flags & DA_FLAG_RETRY_UA) != 0) { 1133 start_ccb->ccb_h.ccb_state |= DA_CCB_RETRY_UA; 1134 softc->flags &= ~DA_FLAG_RETRY_UA; 1135 } 1136 1137 start_ccb->ccb_h.ccb_bp = bp; 1138 bp = bufq_first(&softc->buf_queue); 1139 splx(s); 1140 1141 xpt_action(start_ccb); 1142 } 1143 1144 if (bp != NULL) { 1145 /* Have more work to do, so ensure we stay scheduled */ 1146 xpt_schedule(periph, /* XXX priority */1); 1147 } 1148 break; 1149 } 1150 case DA_STATE_PROBE: 1151 { 1152 struct ccb_scsiio *csio; 1153 struct scsi_read_capacity_data *rcap; 1154 1155 rcap = (struct scsi_read_capacity_data *)malloc(sizeof(*rcap), 1156 M_TEMP, 1157 M_NOWAIT); 1158 if (rcap == NULL) { 1159 printf("dastart: Couldn't malloc read_capacity data\n"); 1160 /* da_free_periph??? */ 1161 break; 1162 } 1163 csio = &start_ccb->csio; 1164 scsi_read_capacity(csio, 1165 /*retries*/4, 1166 dadone, 1167 MSG_SIMPLE_Q_TAG, 1168 rcap, 1169 SSD_FULL_SIZE, 1170 /*timeout*/5000); 1171 start_ccb->ccb_h.ccb_bp = NULL; 1172 start_ccb->ccb_h.ccb_state = DA_CCB_PROBE; 1173 xpt_action(start_ccb); 1174 break; 1175 } 1176 } 1177 } 1178 1179 1180 static void 1181 dadone(struct cam_periph *periph, union ccb *done_ccb) 1182 { 1183 struct da_softc *softc; 1184 struct ccb_scsiio *csio; 1185 1186 softc = (struct da_softc *)periph->softc; 1187 csio = &done_ccb->csio; 1188 switch (csio->ccb_h.ccb_state & DA_CCB_TYPE_MASK) { 1189 case DA_CCB_BUFFER_IO: 1190 { 1191 struct buf *bp; 1192 int oldspl; 1193 1194 bp = (struct buf *)done_ccb->ccb_h.ccb_bp; 1195 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1196 int error; 1197 int s; 1198 int sf; 1199 1200 if ((csio->ccb_h.ccb_state & DA_CCB_RETRY_UA) != 0) 1201 sf = SF_RETRY_UA; 1202 else 1203 sf = 0; 1204 1205 if ((error = daerror(done_ccb, 0, sf)) == ERESTART) { 1206 /* 1207 * A retry was scheuled, so 1208 * just return. 1209 */ 1210 return; 1211 } 1212 if (error != 0) { 1213 struct buf *q_bp; 1214 1215 s = splbio(); 1216 1217 if (error == ENXIO) { 1218 /* 1219 * Catastrophic error. Mark our pack as 1220 * invalid. 1221 */ 1222 /* XXX See if this is really a media 1223 * change first. 1224 */ 1225 xpt_print_path(periph->path); 1226 printf("Invalidating pack\n"); 1227 softc->flags |= DA_FLAG_PACK_INVALID; 1228 } 1229 1230 /* 1231 * return all queued I/O with EIO, so that 1232 * the client can retry these I/Os in the 1233 * proper order should it attempt to recover. 1234 */ 1235 while ((q_bp = bufq_first(&softc->buf_queue)) 1236 != NULL) { 1237 bufq_remove(&softc->buf_queue, q_bp); 1238 q_bp->b_resid = q_bp->b_bcount; 1239 q_bp->b_error = EIO; 1240 q_bp->b_flags |= B_ERROR; 1241 biodone(q_bp); 1242 } 1243 splx(s); 1244 bp->b_error = error; 1245 bp->b_resid = bp->b_bcount; 1246 bp->b_flags |= B_ERROR; 1247 } else { 1248 bp->b_resid = csio->resid; 1249 bp->b_error = 0; 1250 if (bp->b_resid != 0) { 1251 /* Short transfer ??? */ 1252 bp->b_flags |= B_ERROR; 1253 } 1254 } 1255 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1256 cam_release_devq(done_ccb->ccb_h.path, 1257 /*relsim_flags*/0, 1258 /*reduction*/0, 1259 /*timeout*/0, 1260 /*getcount_only*/0); 1261 } else { 1262 bp->b_resid = csio->resid; 1263 if (csio->resid > 0) 1264 bp->b_flags |= B_ERROR; 1265 } 1266 1267 /* 1268 * Block out any asyncronous callbacks 1269 * while we touch the pending ccb list. 1270 */ 1271 oldspl = splcam(); 1272 LIST_REMOVE(&done_ccb->ccb_h, periph_links.le); 1273 splx(oldspl); 1274 1275 devstat_end_transaction(&softc->device_stats, 1276 bp->b_bcount - bp->b_resid, 1277 done_ccb->csio.tag_action & 0xf, 1278 (bp->b_flags & B_READ) ? DEVSTAT_READ 1279 : DEVSTAT_WRITE); 1280 1281 if (softc->device_stats.busy_count == 0) 1282 softc->flags |= DA_FLAG_WENT_IDLE; 1283 1284 biodone(bp); 1285 break; 1286 } 1287 case DA_CCB_PROBE: 1288 { 1289 struct scsi_read_capacity_data *rdcap; 1290 char announce_buf[80]; 1291 1292 rdcap = (struct scsi_read_capacity_data *)csio->data_ptr; 1293 1294 if ((csio->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 1295 struct disk_params *dp; 1296 1297 dasetgeom(periph, rdcap); 1298 dp = &softc->params; 1299 snprintf(announce_buf, sizeof(announce_buf), 1300 "%ldMB (%d %d byte sectors: %dH %dS/T %dC)", 1301 dp->sectors / ((1024L * 1024L) / dp->secsize), 1302 dp->sectors, dp->secsize, dp->heads, 1303 dp->secs_per_track, dp->cylinders); 1304 } else { 1305 int error; 1306 1307 /* 1308 * Retry any UNIT ATTENTION type errors. They 1309 * are expected at boot. 1310 */ 1311 error = daerror(done_ccb, 0, SF_RETRY_UA|SF_NO_PRINT); 1312 if (error == ERESTART) { 1313 /* 1314 * A retry was scheuled, so 1315 * just return. 1316 */ 1317 return; 1318 } else if (error != 0) { 1319 struct scsi_sense_data *sense; 1320 int asc, ascq; 1321 int sense_key, error_code; 1322 int have_sense; 1323 cam_status status; 1324 struct ccb_getdev cgd; 1325 1326 /* Don't wedge this device's queue */ 1327 cam_release_devq(done_ccb->ccb_h.path, 1328 /*relsim_flags*/0, 1329 /*reduction*/0, 1330 /*timeout*/0, 1331 /*getcount_only*/0); 1332 1333 status = done_ccb->ccb_h.status; 1334 1335 xpt_setup_ccb(&cgd.ccb_h, 1336 done_ccb->ccb_h.path, 1337 /* priority */ 1); 1338 cgd.ccb_h.func_code = XPT_GDEV_TYPE; 1339 xpt_action((union ccb *)&cgd); 1340 1341 if (((csio->ccb_h.flags & CAM_SENSE_PHYS) != 0) 1342 || ((csio->ccb_h.flags & CAM_SENSE_PTR) != 0) 1343 || ((status & CAM_AUTOSNS_VALID) == 0)) 1344 have_sense = FALSE; 1345 else 1346 have_sense = TRUE; 1347 1348 if (have_sense) { 1349 sense = &csio->sense_data; 1350 scsi_extract_sense(sense, &error_code, 1351 &sense_key, 1352 &asc, &ascq); 1353 } 1354 /* 1355 * With removable media devices, we expect 1356 * 0x3a (Medium not present) errors, since not 1357 * everyone leaves a disk in the drive. If 1358 * the error is anything else, though, we 1359 * shouldn't attach. 1360 */ 1361 if ((have_sense) && (asc == 0x3a) 1362 && (error_code == SSD_CURRENT_ERROR)) 1363 snprintf(announce_buf, 1364 sizeof(announce_buf), 1365 "Attempt to query device " 1366 "size failed: %s, %s", 1367 scsi_sense_key_text[sense_key], 1368 scsi_sense_desc(asc,ascq, 1369 &cgd.inq_data)); 1370 else { 1371 if (have_sense) 1372 scsi_sense_print( 1373 &done_ccb->csio); 1374 else { 1375 xpt_print_path(periph->path); 1376 printf("got CAM status %#x\n", 1377 done_ccb->ccb_h.status); 1378 } 1379 1380 xpt_print_path(periph->path); 1381 printf("fatal error, failed" 1382 " to attach to device\n"); 1383 1384 /* 1385 * Free up resources. 1386 */ 1387 cam_periph_invalidate(periph); 1388 announce_buf[0] = '\0'; 1389 } 1390 } 1391 } 1392 free(rdcap, M_TEMP); 1393 if (announce_buf[0] != '\0') 1394 xpt_announce_periph(periph, announce_buf); 1395 softc->state = DA_STATE_NORMAL; 1396 /* 1397 * Since our peripheral may be invalidated by an error 1398 * above or an external event, we must release our CCB 1399 * before releasing the probe lock on the peripheral. 1400 * The peripheral will only go away once the last lock 1401 * is removed, and we need it around for the CCB release 1402 * operation. 1403 */ 1404 xpt_release_ccb(done_ccb); 1405 cam_periph_unlock(periph); 1406 return; 1407 } 1408 case DA_CCB_WAITING: 1409 { 1410 /* Caller will release the CCB */ 1411 wakeup(&done_ccb->ccb_h.cbfcnp); 1412 return; 1413 } 1414 case DA_CCB_DUMP: 1415 /* No-op. We're polling */ 1416 return; 1417 default: 1418 break; 1419 } 1420 xpt_release_ccb(done_ccb); 1421 } 1422 1423 static int 1424 daerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1425 { 1426 struct da_softc *softc; 1427 struct cam_periph *periph; 1428 1429 periph = xpt_path_periph(ccb->ccb_h.path); 1430 softc = (struct da_softc *)periph->softc; 1431 1432 /* 1433 * XXX 1434 * Until we have a better way of doing pack validation, 1435 * don't treat UAs as errors. 1436 */ 1437 sense_flags |= SF_RETRY_UA; 1438 return(cam_periph_error(ccb, cam_flags, sense_flags, 1439 &softc->saved_ccb)); 1440 } 1441 1442 static void 1443 daprevent(struct cam_periph *periph, int action) 1444 { 1445 struct da_softc *softc; 1446 union ccb *ccb; 1447 int error; 1448 1449 softc = (struct da_softc *)periph->softc; 1450 1451 if (((action == PR_ALLOW) 1452 && (softc->flags & DA_FLAG_PACK_LOCKED) == 0) 1453 || ((action == PR_PREVENT) 1454 && (softc->flags & DA_FLAG_PACK_LOCKED) != 0)) { 1455 return; 1456 } 1457 1458 ccb = cam_periph_getccb(periph, /*priority*/1); 1459 1460 scsi_prevent(&ccb->csio, 1461 /*retries*/1, 1462 /*cbcfp*/dadone, 1463 MSG_SIMPLE_Q_TAG, 1464 action, 1465 SSD_FULL_SIZE, 1466 5000); 1467 1468 error = cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 1469 /*sense_flags*/0, &softc->device_stats); 1470 1471 if (error == 0) { 1472 if (action == PR_ALLOW) 1473 softc->flags &= ~DA_FLAG_PACK_LOCKED; 1474 else 1475 softc->flags |= DA_FLAG_PACK_LOCKED; 1476 } 1477 1478 xpt_release_ccb(ccb); 1479 } 1480 1481 static void 1482 dasetgeom(struct cam_periph *periph, struct scsi_read_capacity_data * rdcap) 1483 { 1484 struct ccb_calc_geometry ccg; 1485 struct da_softc *softc; 1486 struct disk_params *dp; 1487 1488 softc = (struct da_softc *)periph->softc; 1489 1490 dp = &softc->params; 1491 dp->secsize = scsi_4btoul(rdcap->length); 1492 dp->sectors = scsi_4btoul(rdcap->addr) + 1; 1493 /* 1494 * Have the controller provide us with a geometry 1495 * for this disk. The only time the geometry 1496 * matters is when we boot and the controller 1497 * is the only one knowledgeable enough to come 1498 * up with something that will make this a bootable 1499 * device. 1500 */ 1501 xpt_setup_ccb(&ccg.ccb_h, periph->path, /*priority*/1); 1502 ccg.ccb_h.func_code = XPT_CALC_GEOMETRY; 1503 ccg.block_size = dp->secsize; 1504 ccg.volume_size = dp->sectors; 1505 ccg.heads = 0; 1506 ccg.secs_per_track = 0; 1507 ccg.cylinders = 0; 1508 xpt_action((union ccb*)&ccg); 1509 dp->heads = ccg.heads; 1510 dp->secs_per_track = ccg.secs_per_track; 1511 dp->cylinders = ccg.cylinders; 1512 } 1513 1514 static void 1515 dasendorderedtag(void *arg) 1516 { 1517 struct da_softc *softc; 1518 int s; 1519 1520 for (softc = SLIST_FIRST(&softc_list); 1521 softc != NULL; 1522 softc = SLIST_NEXT(softc, links)) { 1523 s = splsoftcam(); 1524 if ((softc->ordered_tag_count == 0) 1525 && ((softc->flags & DA_FLAG_WENT_IDLE) == 0)) { 1526 softc->flags |= DA_FLAG_NEED_OTAG; 1527 } 1528 if (softc->device_stats.busy_count > 0) 1529 softc->flags &= ~DA_FLAG_WENT_IDLE; 1530 1531 softc->ordered_tag_count = 0; 1532 splx(s); 1533 } 1534 /* Queue us up again */ 1535 timeout(dasendorderedtag, NULL, 1536 (DA_DEFAULT_TIMEOUT * hz) / DA_ORDEREDTAG_INTERVAL); 1537 } 1538 1539 /* 1540 * Step through all DA peripheral drivers, and if the device is still open, 1541 * sync the disk cache to physical media. 1542 */ 1543 static void 1544 dashutdown(int howto, void *arg) 1545 { 1546 struct cam_periph *periph; 1547 struct da_softc *softc; 1548 1549 for (periph = TAILQ_FIRST(&dadriver.units); periph != NULL; 1550 periph = TAILQ_NEXT(periph, unit_links)) { 1551 union ccb ccb; 1552 softc = (struct da_softc *)periph->softc; 1553 1554 /* 1555 * We only sync the cache if the drive is still open, and 1556 * if the drive is capable of it.. 1557 */ 1558 if (((softc->flags & DA_FLAG_OPEN) == 0) 1559 || (softc->quirks & DA_Q_NO_SYNC_CACHE)) 1560 continue; 1561 1562 xpt_setup_ccb(&ccb.ccb_h, periph->path, /*priority*/1); 1563 1564 ccb.ccb_h.ccb_state = DA_CCB_DUMP; 1565 scsi_synchronize_cache(&ccb.csio, 1566 /*retries*/1, 1567 /*cbfcnp*/dadone, 1568 MSG_SIMPLE_Q_TAG, 1569 /*begin_lba*/0, /* whole disk */ 1570 /*lb_count*/0, 1571 SSD_FULL_SIZE, 1572 5 * 60 * 1000); 1573 1574 xpt_polled_action(&ccb); 1575 1576 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1577 if (((ccb.ccb_h.status & CAM_STATUS_MASK) == 1578 CAM_SCSI_STATUS_ERROR) 1579 && (ccb.csio.scsi_status == SCSI_STATUS_CHECK_COND)){ 1580 int error_code, sense_key, asc, ascq; 1581 1582 scsi_extract_sense(&ccb.csio.sense_data, 1583 &error_code, &sense_key, 1584 &asc, &ascq); 1585 1586 if (sense_key != SSD_KEY_ILLEGAL_REQUEST) 1587 scsi_sense_print(&ccb.csio); 1588 } else { 1589 xpt_print_path(periph->path); 1590 printf("Synchronize cache failed, status " 1591 "== 0x%x, scsi status == 0x%x\n", 1592 ccb.ccb_h.status, ccb.csio.scsi_status); 1593 } 1594 } 1595 1596 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 1597 cam_release_devq(ccb.ccb_h.path, 1598 /*relsim_flags*/0, 1599 /*reduction*/0, 1600 /*timeout*/0, 1601 /*getcount_only*/0); 1602 1603 } 1604 } 1605