1 /*- 2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 32 #ifdef _KERNEL 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/bio.h> 36 #include <sys/sysctl.h> 37 #include <sys/taskqueue.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/conf.h> 41 #include <sys/devicestat.h> 42 #include <sys/eventhandler.h> 43 #include <sys/malloc.h> 44 #include <sys/cons.h> 45 #include <geom/geom_disk.h> 46 #endif /* _KERNEL */ 47 48 #ifndef _KERNEL 49 #include <stdio.h> 50 #include <string.h> 51 #endif /* _KERNEL */ 52 53 #include <cam/cam.h> 54 #include <cam/cam_ccb.h> 55 #include <cam/cam_periph.h> 56 #include <cam/cam_xpt_periph.h> 57 #include <cam/cam_sim.h> 58 59 #include <cam/ata/ata_all.h> 60 61 #ifdef _KERNEL 62 63 #define ATA_MAX_28BIT_LBA 268435455UL 64 65 typedef enum { 66 ADA_STATE_NORMAL 67 } ada_state; 68 69 typedef enum { 70 ADA_FLAG_PACK_INVALID = 0x001, 71 ADA_FLAG_CAN_48BIT = 0x002, 72 ADA_FLAG_CAN_FLUSHCACHE = 0x004, 73 ADA_FLAG_CAN_NCQ = 0x008, 74 ADA_FLAG_CAN_DMA = 0x010, 75 ADA_FLAG_NEED_OTAG = 0x020, 76 ADA_FLAG_WENT_IDLE = 0x040, 77 ADA_FLAG_CAN_TRIM = 0x080, 78 ADA_FLAG_OPEN = 0x100, 79 ADA_FLAG_SCTX_INIT = 0x200, 80 ADA_FLAG_CAN_CFA = 0x400 81 } ada_flags; 82 83 typedef enum { 84 ADA_Q_NONE = 0x00 85 } ada_quirks; 86 87 typedef enum { 88 ADA_CCB_BUFFER_IO = 0x03, 89 ADA_CCB_WAITING = 0x04, 90 ADA_CCB_DUMP = 0x05, 91 ADA_CCB_TRIM = 0x06, 92 ADA_CCB_TYPE_MASK = 0x0F, 93 } ada_ccb_state; 94 95 /* Offsets into our private area for storing information */ 96 #define ccb_state ppriv_field0 97 #define ccb_bp ppriv_ptr1 98 99 struct disk_params { 100 u_int8_t heads; 101 u_int8_t secs_per_track; 102 u_int32_t cylinders; 103 u_int32_t secsize; /* Number of bytes/logical sector */ 104 u_int64_t sectors; /* Total number sectors */ 105 }; 106 107 #define TRIM_MAX_BLOCKS 4 108 #define TRIM_MAX_RANGES TRIM_MAX_BLOCKS * 64 109 struct trim_request { 110 uint8_t data[TRIM_MAX_RANGES * 8]; 111 struct bio *bps[TRIM_MAX_RANGES]; 112 }; 113 114 struct ada_softc { 115 struct bio_queue_head bio_queue; 116 struct bio_queue_head trim_queue; 117 ada_state state; 118 ada_flags flags; 119 ada_quirks quirks; 120 int ordered_tag_count; 121 int outstanding_cmds; 122 int trim_max_ranges; 123 int trim_running; 124 struct disk_params params; 125 struct disk *disk; 126 union ccb saved_ccb; 127 struct task sysctl_task; 128 struct sysctl_ctx_list sysctl_ctx; 129 struct sysctl_oid *sysctl_tree; 130 struct callout sendordered_c; 131 struct trim_request trim_req; 132 }; 133 134 struct ada_quirk_entry { 135 struct scsi_inquiry_pattern inq_pat; 136 ada_quirks quirks; 137 }; 138 139 static struct ada_quirk_entry ada_quirk_table[] = 140 { 141 { 142 /* Default */ 143 { 144 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 145 /*vendor*/"*", /*product*/"*", /*revision*/"*" 146 }, 147 /*quirks*/0 148 }, 149 }; 150 151 static disk_strategy_t adastrategy; 152 static dumper_t adadump; 153 static periph_init_t adainit; 154 static void adaasync(void *callback_arg, u_int32_t code, 155 struct cam_path *path, void *arg); 156 static void adasysctlinit(void *context, int pending); 157 static periph_ctor_t adaregister; 158 static periph_dtor_t adacleanup; 159 static periph_start_t adastart; 160 static periph_oninv_t adaoninvalidate; 161 static void adadone(struct cam_periph *periph, 162 union ccb *done_ccb); 163 static int adaerror(union ccb *ccb, u_int32_t cam_flags, 164 u_int32_t sense_flags); 165 static void adagetparams(struct cam_periph *periph, 166 struct ccb_getdev *cgd); 167 static timeout_t adasendorderedtag; 168 static void adashutdown(void *arg, int howto); 169 170 #ifndef ADA_DEFAULT_TIMEOUT 171 #define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ 172 #endif 173 174 #ifndef ADA_DEFAULT_RETRY 175 #define ADA_DEFAULT_RETRY 4 176 #endif 177 178 #ifndef ADA_DEFAULT_SEND_ORDERED 179 #define ADA_DEFAULT_SEND_ORDERED 1 180 #endif 181 182 183 static int ada_retry_count = ADA_DEFAULT_RETRY; 184 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT; 185 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED; 186 187 SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0, 188 "CAM Direct Access Disk driver"); 189 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW, 190 &ada_retry_count, 0, "Normal I/O retry count"); 191 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count); 192 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW, 193 &ada_default_timeout, 0, "Normal I/O timeout (in seconds)"); 194 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout); 195 SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW, 196 &ada_send_ordered, 0, "Send Ordered Tags"); 197 TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered); 198 199 /* 200 * ADA_ORDEREDTAG_INTERVAL determines how often, relative 201 * to the default timeout, we check to see whether an ordered 202 * tagged transaction is appropriate to prevent simple tag 203 * starvation. Since we'd like to ensure that there is at least 204 * 1/2 of the timeout length left for a starved transaction to 205 * complete after we've sent an ordered tag, we must poll at least 206 * four times in every timeout period. This takes care of the worst 207 * case where a starved transaction starts during an interval that 208 * meets the requirement "don't send an ordered tag" test so it takes 209 * us two intervals to determine that a tag must be sent. 210 */ 211 #ifndef ADA_ORDEREDTAG_INTERVAL 212 #define ADA_ORDEREDTAG_INTERVAL 4 213 #endif 214 215 static struct periph_driver adadriver = 216 { 217 adainit, "ada", 218 TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0 219 }; 220 221 PERIPHDRIVER_DECLARE(ada, adadriver); 222 223 MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers"); 224 225 static int 226 adaopen(struct disk *dp) 227 { 228 struct cam_periph *periph; 229 struct ada_softc *softc; 230 int unit; 231 int error; 232 233 periph = (struct cam_periph *)dp->d_drv1; 234 if (periph == NULL) { 235 return (ENXIO); 236 } 237 238 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 239 return(ENXIO); 240 } 241 242 cam_periph_lock(periph); 243 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { 244 cam_periph_unlock(periph); 245 cam_periph_release(periph); 246 return (error); 247 } 248 249 unit = periph->unit_number; 250 softc = (struct ada_softc *)periph->softc; 251 softc->flags |= ADA_FLAG_OPEN; 252 253 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 254 ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit, 255 unit)); 256 257 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) { 258 /* Invalidate our pack information. */ 259 softc->flags &= ~ADA_FLAG_PACK_INVALID; 260 } 261 262 cam_periph_unhold(periph); 263 cam_periph_unlock(periph); 264 return (0); 265 } 266 267 static int 268 adaclose(struct disk *dp) 269 { 270 struct cam_periph *periph; 271 struct ada_softc *softc; 272 union ccb *ccb; 273 int error; 274 275 periph = (struct cam_periph *)dp->d_drv1; 276 if (periph == NULL) 277 return (ENXIO); 278 279 cam_periph_lock(periph); 280 if ((error = cam_periph_hold(periph, PRIBIO)) != 0) { 281 cam_periph_unlock(periph); 282 cam_periph_release(periph); 283 return (error); 284 } 285 286 softc = (struct ada_softc *)periph->softc; 287 /* We only sync the cache if the drive is capable of it. */ 288 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) { 289 290 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 291 cam_fill_ataio(&ccb->ataio, 292 1, 293 adadone, 294 CAM_DIR_NONE, 295 0, 296 NULL, 297 0, 298 ada_default_timeout*1000); 299 300 if (softc->flags & ADA_FLAG_CAN_48BIT) 301 ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0); 302 else 303 ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0); 304 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 305 /*sense_flags*/0, softc->disk->d_devstat); 306 307 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 308 xpt_print(periph->path, "Synchronize cache failed\n"); 309 310 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 311 cam_release_devq(ccb->ccb_h.path, 312 /*relsim_flags*/0, 313 /*reduction*/0, 314 /*timeout*/0, 315 /*getcount_only*/0); 316 xpt_release_ccb(ccb); 317 } 318 319 softc->flags &= ~ADA_FLAG_OPEN; 320 cam_periph_unhold(periph); 321 cam_periph_unlock(periph); 322 cam_periph_release(periph); 323 return (0); 324 } 325 326 static void 327 adaschedule(struct cam_periph *periph) 328 { 329 struct ada_softc *softc = (struct ada_softc *)periph->softc; 330 331 if (bioq_first(&softc->bio_queue) || 332 (!softc->trim_running && bioq_first(&softc->trim_queue))) { 333 /* Have more work to do, so ensure we stay scheduled */ 334 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 335 } 336 } 337 338 /* 339 * Actually translate the requested transfer into one the physical driver 340 * can understand. The transfer is described by a buf and will include 341 * only one physical transfer. 342 */ 343 static void 344 adastrategy(struct bio *bp) 345 { 346 struct cam_periph *periph; 347 struct ada_softc *softc; 348 349 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 350 if (periph == NULL) { 351 biofinish(bp, NULL, ENXIO); 352 return; 353 } 354 softc = (struct ada_softc *)periph->softc; 355 356 cam_periph_lock(periph); 357 358 /* 359 * If the device has been made invalid, error out 360 */ 361 if ((softc->flags & ADA_FLAG_PACK_INVALID)) { 362 cam_periph_unlock(periph); 363 biofinish(bp, NULL, ENXIO); 364 return; 365 } 366 367 /* 368 * Place it in the queue of disk activities for this disk 369 */ 370 if (bp->bio_cmd == BIO_DELETE && 371 (softc->flags & ADA_FLAG_CAN_TRIM)) 372 bioq_disksort(&softc->trim_queue, bp); 373 else 374 bioq_disksort(&softc->bio_queue, bp); 375 376 /* 377 * Schedule ourselves for performing the work. 378 */ 379 adaschedule(periph); 380 cam_periph_unlock(periph); 381 382 return; 383 } 384 385 static int 386 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 387 { 388 struct cam_periph *periph; 389 struct ada_softc *softc; 390 u_int secsize; 391 union ccb ccb; 392 struct disk *dp; 393 uint64_t lba; 394 uint16_t count; 395 396 dp = arg; 397 periph = dp->d_drv1; 398 if (periph == NULL) 399 return (ENXIO); 400 softc = (struct ada_softc *)periph->softc; 401 cam_periph_lock(periph); 402 secsize = softc->params.secsize; 403 lba = offset / secsize; 404 count = length / secsize; 405 406 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) { 407 cam_periph_unlock(periph); 408 return (ENXIO); 409 } 410 411 if (length > 0) { 412 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 413 ccb.ccb_h.ccb_state = ADA_CCB_DUMP; 414 cam_fill_ataio(&ccb.ataio, 415 0, 416 adadone, 417 CAM_DIR_OUT, 418 0, 419 (u_int8_t *) virtual, 420 length, 421 ada_default_timeout*1000); 422 if ((softc->flags & ADA_FLAG_CAN_48BIT) && 423 (lba + count >= ATA_MAX_28BIT_LBA || 424 count >= 256)) { 425 ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48, 426 0, lba, count); 427 } else { 428 ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA, 429 0, lba, count); 430 } 431 xpt_polled_action(&ccb); 432 433 if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 434 printf("Aborting dump due to I/O error.\n"); 435 cam_periph_unlock(periph); 436 return(EIO); 437 } 438 cam_periph_unlock(periph); 439 return(0); 440 } 441 442 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) { 443 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 444 445 ccb.ccb_h.ccb_state = ADA_CCB_DUMP; 446 cam_fill_ataio(&ccb.ataio, 447 1, 448 adadone, 449 CAM_DIR_NONE, 450 0, 451 NULL, 452 0, 453 ada_default_timeout*1000); 454 455 if (softc->flags & ADA_FLAG_CAN_48BIT) 456 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0); 457 else 458 ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0); 459 xpt_polled_action(&ccb); 460 461 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 462 xpt_print(periph->path, "Synchronize cache failed\n"); 463 464 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 465 cam_release_devq(ccb.ccb_h.path, 466 /*relsim_flags*/0, 467 /*reduction*/0, 468 /*timeout*/0, 469 /*getcount_only*/0); 470 } 471 cam_periph_unlock(periph); 472 return (0); 473 } 474 475 static void 476 adainit(void) 477 { 478 cam_status status; 479 480 /* 481 * Install a global async callback. This callback will 482 * receive async callbacks like "new device found". 483 */ 484 status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL); 485 486 if (status != CAM_REQ_CMP) { 487 printf("ada: Failed to attach master async callback " 488 "due to status 0x%x!\n", status); 489 } else if (ada_send_ordered) { 490 491 /* Register our shutdown event handler */ 492 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown, 493 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 494 printf("adainit: shutdown event registration failed!\n"); 495 } 496 } 497 498 static void 499 adaoninvalidate(struct cam_periph *periph) 500 { 501 struct ada_softc *softc; 502 503 softc = (struct ada_softc *)periph->softc; 504 505 /* 506 * De-register any async callbacks. 507 */ 508 xpt_register_async(0, adaasync, periph, periph->path); 509 510 softc->flags |= ADA_FLAG_PACK_INVALID; 511 512 /* 513 * Return all queued I/O with ENXIO. 514 * XXX Handle any transactions queued to the card 515 * with XPT_ABORT_CCB. 516 */ 517 bioq_flush(&softc->bio_queue, NULL, ENXIO); 518 bioq_flush(&softc->trim_queue, NULL, ENXIO); 519 520 disk_gone(softc->disk); 521 xpt_print(periph->path, "lost device\n"); 522 } 523 524 static void 525 adacleanup(struct cam_periph *periph) 526 { 527 struct ada_softc *softc; 528 529 softc = (struct ada_softc *)periph->softc; 530 531 xpt_print(periph->path, "removing device entry\n"); 532 cam_periph_unlock(periph); 533 534 /* 535 * If we can't free the sysctl tree, oh well... 536 */ 537 if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0 538 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { 539 xpt_print(periph->path, "can't remove sysctl context\n"); 540 } 541 542 disk_destroy(softc->disk); 543 callout_drain(&softc->sendordered_c); 544 free(softc, M_DEVBUF); 545 cam_periph_lock(periph); 546 } 547 548 static void 549 adaasync(void *callback_arg, u_int32_t code, 550 struct cam_path *path, void *arg) 551 { 552 struct cam_periph *periph; 553 554 periph = (struct cam_periph *)callback_arg; 555 switch (code) { 556 case AC_FOUND_DEVICE: 557 { 558 struct ccb_getdev *cgd; 559 cam_status status; 560 561 cgd = (struct ccb_getdev *)arg; 562 if (cgd == NULL) 563 break; 564 565 if (cgd->protocol != PROTO_ATA) 566 break; 567 568 /* 569 * Allocate a peripheral instance for 570 * this device and start the probe 571 * process. 572 */ 573 status = cam_periph_alloc(adaregister, adaoninvalidate, 574 adacleanup, adastart, 575 "ada", CAM_PERIPH_BIO, 576 cgd->ccb_h.path, adaasync, 577 AC_FOUND_DEVICE, cgd); 578 579 if (status != CAM_REQ_CMP 580 && status != CAM_REQ_INPROG) 581 printf("adaasync: Unable to attach to new device " 582 "due to status 0x%x\n", status); 583 break; 584 } 585 default: 586 cam_periph_async(periph, code, path, arg); 587 break; 588 } 589 } 590 591 static void 592 adasysctlinit(void *context, int pending) 593 { 594 struct cam_periph *periph; 595 struct ada_softc *softc; 596 char tmpstr[80], tmpstr2[80]; 597 598 periph = (struct cam_periph *)context; 599 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 600 return; 601 602 softc = (struct ada_softc *)periph->softc; 603 snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number); 604 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 605 606 sysctl_ctx_init(&softc->sysctl_ctx); 607 softc->flags |= ADA_FLAG_SCTX_INIT; 608 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 609 SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2, 610 CTLFLAG_RD, 0, tmpstr); 611 if (softc->sysctl_tree == NULL) { 612 printf("adasysctlinit: unable to allocate sysctl tree\n"); 613 cam_periph_release(periph); 614 return; 615 } 616 617 cam_periph_release(periph); 618 } 619 620 static cam_status 621 adaregister(struct cam_periph *periph, void *arg) 622 { 623 struct ada_softc *softc; 624 struct ccb_pathinq cpi; 625 struct ccb_getdev *cgd; 626 char announce_buf[80]; 627 struct disk_params *dp; 628 caddr_t match; 629 u_int maxio; 630 631 cgd = (struct ccb_getdev *)arg; 632 if (periph == NULL) { 633 printf("adaregister: periph was NULL!!\n"); 634 return(CAM_REQ_CMP_ERR); 635 } 636 637 if (cgd == NULL) { 638 printf("adaregister: no getdev CCB, can't register device\n"); 639 return(CAM_REQ_CMP_ERR); 640 } 641 642 softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF, 643 M_NOWAIT|M_ZERO); 644 645 if (softc == NULL) { 646 printf("adaregister: Unable to probe new device. " 647 "Unable to allocate softc\n"); 648 return(CAM_REQ_CMP_ERR); 649 } 650 651 bioq_init(&softc->bio_queue); 652 bioq_init(&softc->trim_queue); 653 654 if (cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) 655 softc->flags |= ADA_FLAG_CAN_DMA; 656 if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) 657 softc->flags |= ADA_FLAG_CAN_48BIT; 658 if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE) 659 softc->flags |= ADA_FLAG_CAN_FLUSHCACHE; 660 if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ && 661 cgd->inq_flags & SID_CmdQue) 662 softc->flags |= ADA_FLAG_CAN_NCQ; 663 if (cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) { 664 softc->flags |= ADA_FLAG_CAN_TRIM; 665 softc->trim_max_ranges = TRIM_MAX_RANGES; 666 if (cgd->ident_data.max_dsm_blocks != 0) { 667 softc->trim_max_ranges = 668 min(cgd->ident_data.max_dsm_blocks * 64, 669 softc->trim_max_ranges); 670 } 671 } 672 if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA) 673 softc->flags |= ADA_FLAG_CAN_CFA; 674 softc->state = ADA_STATE_NORMAL; 675 676 periph->softc = softc; 677 678 /* 679 * See if this device has any quirks. 680 */ 681 match = cam_quirkmatch((caddr_t)&cgd->ident_data, 682 (caddr_t)ada_quirk_table, 683 sizeof(ada_quirk_table)/sizeof(*ada_quirk_table), 684 sizeof(*ada_quirk_table), ata_identify_match); 685 if (match != NULL) 686 softc->quirks = ((struct ada_quirk_entry *)match)->quirks; 687 else 688 softc->quirks = ADA_Q_NONE; 689 690 /* Check if the SIM does not want queued commands */ 691 bzero(&cpi, sizeof(cpi)); 692 xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 693 cpi.ccb_h.func_code = XPT_PATH_INQ; 694 xpt_action((union ccb *)&cpi); 695 if (cpi.ccb_h.status != CAM_REQ_CMP || 696 (cpi.hba_inquiry & PI_TAG_ABLE) == 0) 697 softc->flags &= ~ADA_FLAG_CAN_NCQ; 698 699 TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph); 700 701 /* 702 * Register this media as a disk 703 */ 704 mtx_unlock(periph->sim->mtx); 705 adagetparams(periph, cgd); 706 softc->disk = disk_alloc(); 707 softc->disk->d_open = adaopen; 708 softc->disk->d_close = adaclose; 709 softc->disk->d_strategy = adastrategy; 710 softc->disk->d_dump = adadump; 711 softc->disk->d_name = "ada"; 712 softc->disk->d_drv1 = periph; 713 maxio = cpi.maxio; /* Honor max I/O size of SIM */ 714 if (maxio == 0) 715 maxio = DFLTPHYS; /* traditional default */ 716 else if (maxio > MAXPHYS) 717 maxio = MAXPHYS; /* for safety */ 718 if (softc->flags & ADA_FLAG_CAN_48BIT) 719 maxio = min(maxio, 65536 * softc->params.secsize); 720 else /* 28bit ATA command limit */ 721 maxio = min(maxio, 256 * softc->params.secsize); 722 softc->disk->d_maxsize = maxio; 723 softc->disk->d_unit = periph->unit_number; 724 softc->disk->d_flags = 0; 725 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) 726 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 727 if ((softc->flags & ADA_FLAG_CAN_TRIM) || 728 ((softc->flags & ADA_FLAG_CAN_CFA) && 729 !(softc->flags & ADA_FLAG_CAN_48BIT))) 730 softc->disk->d_flags |= DISKFLAG_CANDELETE; 731 strlcpy(softc->disk->d_ident, cgd->serial_num, 732 MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1)); 733 734 softc->disk->d_sectorsize = softc->params.secsize; 735 softc->disk->d_mediasize = (off_t)softc->params.sectors * 736 softc->params.secsize; 737 if (ata_physical_sector_size(&cgd->ident_data) != 738 softc->params.secsize) { 739 softc->disk->d_stripesize = 740 ata_physical_sector_size(&cgd->ident_data); 741 softc->disk->d_stripeoffset = (softc->disk->d_stripesize - 742 ata_logical_sector_offset(&cgd->ident_data)) % 743 softc->disk->d_stripesize; 744 } 745 /* XXX: these are not actually "firmware" values, so they may be wrong */ 746 softc->disk->d_fwsectors = softc->params.secs_per_track; 747 softc->disk->d_fwheads = softc->params.heads; 748 749 disk_create(softc->disk, DISK_VERSION); 750 mtx_lock(periph->sim->mtx); 751 752 dp = &softc->params; 753 snprintf(announce_buf, sizeof(announce_buf), 754 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)", 755 (uintmax_t)(((uintmax_t)dp->secsize * 756 dp->sectors) / (1024*1024)), 757 (uintmax_t)dp->sectors, 758 dp->secsize, dp->heads, 759 dp->secs_per_track, dp->cylinders); 760 xpt_announce_periph(periph, announce_buf); 761 /* 762 * Add async callbacks for bus reset and 763 * bus device reset calls. I don't bother 764 * checking if this fails as, in most cases, 765 * the system will function just fine without 766 * them and the only alternative would be to 767 * not attach the device on failure. 768 */ 769 xpt_register_async(AC_LOST_DEVICE, 770 adaasync, periph, periph->path); 771 772 /* 773 * Schedule a periodic event to occasionally send an 774 * ordered tag to a device. 775 */ 776 callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0); 777 callout_reset(&softc->sendordered_c, 778 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL, 779 adasendorderedtag, softc); 780 781 return(CAM_REQ_CMP); 782 } 783 784 static void 785 adastart(struct cam_periph *periph, union ccb *start_ccb) 786 { 787 struct ada_softc *softc = (struct ada_softc *)periph->softc; 788 struct ccb_ataio *ataio = &start_ccb->ataio; 789 790 switch (softc->state) { 791 case ADA_STATE_NORMAL: 792 { 793 struct bio *bp; 794 u_int8_t tag_code; 795 796 /* Execute immediate CCB if waiting. */ 797 if (periph->immediate_priority <= periph->pinfo.priority) { 798 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 799 ("queuing for immediate ccb\n")); 800 start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING; 801 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 802 periph_links.sle); 803 periph->immediate_priority = CAM_PRIORITY_NONE; 804 wakeup(&periph->ccb_list); 805 /* Have more work to do, so ensure we stay scheduled */ 806 adaschedule(periph); 807 break; 808 } 809 /* Run TRIM if not running yet. */ 810 if (!softc->trim_running && 811 (bp = bioq_first(&softc->trim_queue)) != 0) { 812 struct trim_request *req = &softc->trim_req; 813 struct bio *bp1; 814 int bps = 0, ranges = 0; 815 816 softc->trim_running = 1; 817 bzero(req, sizeof(*req)); 818 bp1 = bp; 819 do { 820 uint64_t lba = bp1->bio_pblkno; 821 int count = bp1->bio_bcount / 822 softc->params.secsize; 823 824 bioq_remove(&softc->trim_queue, bp1); 825 while (count > 0) { 826 int c = min(count, 0xffff); 827 int off = ranges * 8; 828 829 req->data[off + 0] = lba & 0xff; 830 req->data[off + 1] = (lba >> 8) & 0xff; 831 req->data[off + 2] = (lba >> 16) & 0xff; 832 req->data[off + 3] = (lba >> 24) & 0xff; 833 req->data[off + 4] = (lba >> 32) & 0xff; 834 req->data[off + 5] = (lba >> 40) & 0xff; 835 req->data[off + 6] = c & 0xff; 836 req->data[off + 7] = (c >> 8) & 0xff; 837 lba += c; 838 count -= c; 839 ranges++; 840 } 841 req->bps[bps++] = bp1; 842 bp1 = bioq_first(&softc->trim_queue); 843 if (bp1 == NULL || 844 bp1->bio_bcount / softc->params.secsize > 845 (softc->trim_max_ranges - ranges) * 0xffff) 846 break; 847 } while (1); 848 cam_fill_ataio(ataio, 849 ada_retry_count, 850 adadone, 851 CAM_DIR_OUT, 852 0, 853 req->data, 854 ((ranges + 63) / 64) * 512, 855 ada_default_timeout * 1000); 856 ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT, 857 ATA_DSM_TRIM, 0, (ranges + 63) / 64); 858 start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM; 859 goto out; 860 } 861 /* Run regular command. */ 862 bp = bioq_first(&softc->bio_queue); 863 if (bp == NULL) { 864 xpt_release_ccb(start_ccb); 865 break; 866 } 867 bioq_remove(&softc->bio_queue, bp); 868 869 if ((softc->flags & ADA_FLAG_NEED_OTAG) != 0) { 870 softc->flags &= ~ADA_FLAG_NEED_OTAG; 871 softc->ordered_tag_count++; 872 tag_code = 0; 873 } else { 874 tag_code = 1; 875 } 876 switch (bp->bio_cmd) { 877 case BIO_READ: 878 case BIO_WRITE: 879 { 880 uint64_t lba = bp->bio_pblkno; 881 uint16_t count = bp->bio_bcount / softc->params.secsize; 882 883 cam_fill_ataio(ataio, 884 ada_retry_count, 885 adadone, 886 bp->bio_cmd == BIO_READ ? 887 CAM_DIR_IN : CAM_DIR_OUT, 888 tag_code, 889 bp->bio_data, 890 bp->bio_bcount, 891 ada_default_timeout*1000); 892 893 if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) { 894 if (bp->bio_cmd == BIO_READ) { 895 ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED, 896 lba, count); 897 } else { 898 ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED, 899 lba, count); 900 } 901 } else if ((softc->flags & ADA_FLAG_CAN_48BIT) && 902 (lba + count >= ATA_MAX_28BIT_LBA || 903 count > 256)) { 904 if (softc->flags & ADA_FLAG_CAN_DMA) { 905 if (bp->bio_cmd == BIO_READ) { 906 ata_48bit_cmd(ataio, ATA_READ_DMA48, 907 0, lba, count); 908 } else { 909 ata_48bit_cmd(ataio, ATA_WRITE_DMA48, 910 0, lba, count); 911 } 912 } else { 913 if (bp->bio_cmd == BIO_READ) { 914 ata_48bit_cmd(ataio, ATA_READ_MUL48, 915 0, lba, count); 916 } else { 917 ata_48bit_cmd(ataio, ATA_WRITE_MUL48, 918 0, lba, count); 919 } 920 } 921 } else { 922 if (count == 256) 923 count = 0; 924 if (softc->flags & ADA_FLAG_CAN_DMA) { 925 if (bp->bio_cmd == BIO_READ) { 926 ata_28bit_cmd(ataio, ATA_READ_DMA, 927 0, lba, count); 928 } else { 929 ata_28bit_cmd(ataio, ATA_WRITE_DMA, 930 0, lba, count); 931 } 932 } else { 933 if (bp->bio_cmd == BIO_READ) { 934 ata_28bit_cmd(ataio, ATA_READ_MUL, 935 0, lba, count); 936 } else { 937 ata_28bit_cmd(ataio, ATA_WRITE_MUL, 938 0, lba, count); 939 } 940 } 941 } 942 break; 943 } 944 case BIO_DELETE: 945 { 946 uint64_t lba = bp->bio_pblkno; 947 uint16_t count = bp->bio_bcount / softc->params.secsize; 948 949 cam_fill_ataio(ataio, 950 ada_retry_count, 951 adadone, 952 CAM_DIR_NONE, 953 0, 954 NULL, 955 0, 956 ada_default_timeout*1000); 957 958 if (count >= 256) 959 count = 0; 960 ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count); 961 break; 962 } 963 case BIO_FLUSH: 964 cam_fill_ataio(ataio, 965 1, 966 adadone, 967 CAM_DIR_NONE, 968 0, 969 NULL, 970 0, 971 ada_default_timeout*1000); 972 973 if (softc->flags & ADA_FLAG_CAN_48BIT) 974 ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0); 975 else 976 ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0); 977 break; 978 } 979 start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO; 980 out: 981 start_ccb->ccb_h.ccb_bp = bp; 982 softc->outstanding_cmds++; 983 xpt_action(start_ccb); 984 985 /* May have more work to do, so ensure we stay scheduled */ 986 adaschedule(periph); 987 break; 988 } 989 } 990 } 991 992 static void 993 adadone(struct cam_periph *periph, union ccb *done_ccb) 994 { 995 struct ada_softc *softc; 996 struct ccb_ataio *ataio; 997 998 softc = (struct ada_softc *)periph->softc; 999 ataio = &done_ccb->ataio; 1000 switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) { 1001 case ADA_CCB_BUFFER_IO: 1002 case ADA_CCB_TRIM: 1003 { 1004 struct bio *bp; 1005 1006 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 1007 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1008 int error; 1009 1010 error = adaerror(done_ccb, 0, 0); 1011 if (error == ERESTART) { 1012 /* A retry was scheduled, so just return. */ 1013 return; 1014 } 1015 if (error != 0) { 1016 if (error == ENXIO) { 1017 /* 1018 * Catastrophic error. Mark our pack as 1019 * invalid. 1020 */ 1021 /* 1022 * XXX See if this is really a media 1023 * XXX change first? 1024 */ 1025 xpt_print(periph->path, 1026 "Invalidating pack\n"); 1027 softc->flags |= ADA_FLAG_PACK_INVALID; 1028 } 1029 bp->bio_error = error; 1030 bp->bio_resid = bp->bio_bcount; 1031 bp->bio_flags |= BIO_ERROR; 1032 } else { 1033 bp->bio_resid = ataio->resid; 1034 bp->bio_error = 0; 1035 if (bp->bio_resid != 0) 1036 bp->bio_flags |= BIO_ERROR; 1037 } 1038 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1039 cam_release_devq(done_ccb->ccb_h.path, 1040 /*relsim_flags*/0, 1041 /*reduction*/0, 1042 /*timeout*/0, 1043 /*getcount_only*/0); 1044 } else { 1045 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1046 panic("REQ_CMP with QFRZN"); 1047 bp->bio_resid = ataio->resid; 1048 if (ataio->resid > 0) 1049 bp->bio_flags |= BIO_ERROR; 1050 } 1051 softc->outstanding_cmds--; 1052 if (softc->outstanding_cmds == 0) 1053 softc->flags |= ADA_FLAG_WENT_IDLE; 1054 if ((ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) == 1055 ADA_CCB_TRIM) { 1056 struct trim_request *req = 1057 (struct trim_request *)ataio->data_ptr; 1058 int i; 1059 1060 for (i = 1; i < softc->trim_max_ranges && 1061 req->bps[i]; i++) { 1062 struct bio *bp1 = req->bps[i]; 1063 1064 bp1->bio_resid = bp->bio_resid; 1065 bp1->bio_error = bp->bio_error; 1066 if (bp->bio_flags & BIO_ERROR) 1067 bp1->bio_flags |= BIO_ERROR; 1068 biodone(bp1); 1069 } 1070 softc->trim_running = 0; 1071 biodone(bp); 1072 adaschedule(periph); 1073 } else 1074 biodone(bp); 1075 break; 1076 } 1077 case ADA_CCB_WAITING: 1078 { 1079 /* Caller will release the CCB */ 1080 wakeup(&done_ccb->ccb_h.cbfcnp); 1081 return; 1082 } 1083 case ADA_CCB_DUMP: 1084 /* No-op. We're polling */ 1085 return; 1086 default: 1087 break; 1088 } 1089 xpt_release_ccb(done_ccb); 1090 } 1091 1092 static int 1093 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1094 { 1095 struct ada_softc *softc; 1096 struct cam_periph *periph; 1097 1098 periph = xpt_path_periph(ccb->ccb_h.path); 1099 softc = (struct ada_softc *)periph->softc; 1100 1101 return(cam_periph_error(ccb, cam_flags, sense_flags, 1102 &softc->saved_ccb)); 1103 } 1104 1105 static void 1106 adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd) 1107 { 1108 struct ada_softc *softc = (struct ada_softc *)periph->softc; 1109 struct disk_params *dp = &softc->params; 1110 u_int64_t lbasize48; 1111 u_int32_t lbasize; 1112 1113 dp->secsize = ata_logical_sector_size(&cgd->ident_data); 1114 if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) && 1115 cgd->ident_data.current_heads && cgd->ident_data.current_sectors) { 1116 dp->heads = cgd->ident_data.current_heads; 1117 dp->secs_per_track = cgd->ident_data.current_sectors; 1118 dp->cylinders = cgd->ident_data.cylinders; 1119 dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 | 1120 ((u_int32_t)cgd->ident_data.current_size_2 << 16); 1121 } else { 1122 dp->heads = cgd->ident_data.heads; 1123 dp->secs_per_track = cgd->ident_data.sectors; 1124 dp->cylinders = cgd->ident_data.cylinders; 1125 dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track; 1126 } 1127 lbasize = (u_int32_t)cgd->ident_data.lba_size_1 | 1128 ((u_int32_t)cgd->ident_data.lba_size_2 << 16); 1129 1130 /* use the 28bit LBA size if valid or bigger than the CHS mapping */ 1131 if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize) 1132 dp->sectors = lbasize; 1133 1134 /* use the 48bit LBA size if valid */ 1135 lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) | 1136 ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) | 1137 ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) | 1138 ((u_int64_t)cgd->ident_data.lba_size48_4 << 48); 1139 if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) && 1140 lbasize48 > ATA_MAX_28BIT_LBA) 1141 dp->sectors = lbasize48; 1142 } 1143 1144 static void 1145 adasendorderedtag(void *arg) 1146 { 1147 struct ada_softc *softc = arg; 1148 1149 if (ada_send_ordered) { 1150 if ((softc->ordered_tag_count == 0) 1151 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) { 1152 softc->flags |= ADA_FLAG_NEED_OTAG; 1153 } 1154 if (softc->outstanding_cmds > 0) 1155 softc->flags &= ~ADA_FLAG_WENT_IDLE; 1156 1157 softc->ordered_tag_count = 0; 1158 } 1159 /* Queue us up again */ 1160 callout_reset(&softc->sendordered_c, 1161 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL, 1162 adasendorderedtag, softc); 1163 } 1164 1165 /* 1166 * Step through all ADA peripheral drivers, and if the device is still open, 1167 * sync the disk cache to physical media. 1168 */ 1169 static void 1170 adashutdown(void * arg, int howto) 1171 { 1172 struct cam_periph *periph; 1173 struct ada_softc *softc; 1174 1175 TAILQ_FOREACH(periph, &adadriver.units, unit_links) { 1176 union ccb ccb; 1177 1178 /* If we paniced with lock held - not recurse here. */ 1179 if (cam_periph_owned(periph)) 1180 continue; 1181 cam_periph_lock(periph); 1182 softc = (struct ada_softc *)periph->softc; 1183 /* 1184 * We only sync the cache if the drive is still open, and 1185 * if the drive is capable of it.. 1186 */ 1187 if (((softc->flags & ADA_FLAG_OPEN) == 0) || 1188 (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) { 1189 cam_periph_unlock(periph); 1190 continue; 1191 } 1192 1193 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1194 1195 ccb.ccb_h.ccb_state = ADA_CCB_DUMP; 1196 cam_fill_ataio(&ccb.ataio, 1197 1, 1198 adadone, 1199 CAM_DIR_NONE, 1200 0, 1201 NULL, 1202 0, 1203 ada_default_timeout*1000); 1204 1205 if (softc->flags & ADA_FLAG_CAN_48BIT) 1206 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0); 1207 else 1208 ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0); 1209 xpt_polled_action(&ccb); 1210 1211 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 1212 xpt_print(periph->path, "Synchronize cache failed\n"); 1213 1214 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 1215 cam_release_devq(ccb.ccb_h.path, 1216 /*relsim_flags*/0, 1217 /*reduction*/0, 1218 /*timeout*/0, 1219 /*getcount_only*/0); 1220 cam_periph_unlock(periph); 1221 } 1222 } 1223 1224 #endif /* _KERNEL */ 1225