1 /*- 2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 32 #ifdef _KERNEL 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/bio.h> 36 #include <sys/sysctl.h> 37 #include <sys/taskqueue.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/conf.h> 41 #include <sys/devicestat.h> 42 #include <sys/eventhandler.h> 43 #include <sys/malloc.h> 44 #include <sys/cons.h> 45 #include <geom/geom_disk.h> 46 #endif /* _KERNEL */ 47 48 #ifndef _KERNEL 49 #include <stdio.h> 50 #include <string.h> 51 #endif /* _KERNEL */ 52 53 #include <cam/cam.h> 54 #include <cam/cam_ccb.h> 55 #include <cam/cam_periph.h> 56 #include <cam/cam_xpt_periph.h> 57 #include <cam/cam_sim.h> 58 59 #include <cam/ata/ata_all.h> 60 61 #include <machine/md_var.h> /* geometry translation */ 62 63 #ifdef _KERNEL 64 65 #define ATA_MAX_28BIT_LBA 268435455UL 66 67 typedef enum { 68 ADA_STATE_NORMAL 69 } ada_state; 70 71 typedef enum { 72 ADA_FLAG_PACK_INVALID = 0x001, 73 ADA_FLAG_CAN_48BIT = 0x002, 74 ADA_FLAG_CAN_FLUSHCACHE = 0x004, 75 ADA_FLAG_CAN_NCQ = 0x008, 76 ADA_FLAG_CAN_DMA = 0x010, 77 ADA_FLAG_NEED_OTAG = 0x020, 78 ADA_FLAG_WENT_IDLE = 0x040, 79 ADA_FLAG_CAN_TRIM = 0x080, 80 ADA_FLAG_OPEN = 0x100, 81 ADA_FLAG_SCTX_INIT = 0x200, 82 ADA_FLAG_CAN_CFA = 0x400 83 } ada_flags; 84 85 typedef enum { 86 ADA_Q_NONE = 0x00 87 } ada_quirks; 88 89 typedef enum { 90 ADA_CCB_BUFFER_IO = 0x03, 91 ADA_CCB_WAITING = 0x04, 92 ADA_CCB_DUMP = 0x05, 93 ADA_CCB_TRIM = 0x06, 94 ADA_CCB_TYPE_MASK = 0x0F, 95 } ada_ccb_state; 96 97 /* Offsets into our private area for storing information */ 98 #define ccb_state ppriv_field0 99 #define ccb_bp ppriv_ptr1 100 101 struct disk_params { 102 u_int8_t heads; 103 u_int8_t secs_per_track; 104 u_int32_t cylinders; 105 u_int32_t secsize; /* Number of bytes/logical sector */ 106 u_int64_t sectors; /* Total number sectors */ 107 }; 108 109 #define TRIM_MAX_BLOCKS 4 110 #define TRIM_MAX_RANGES TRIM_MAX_BLOCKS * 64 111 struct trim_request { 112 uint8_t data[TRIM_MAX_RANGES * 8]; 113 struct bio *bps[TRIM_MAX_RANGES]; 114 }; 115 116 struct ada_softc { 117 struct bio_queue_head bio_queue; 118 struct bio_queue_head trim_queue; 119 ada_state state; 120 ada_flags flags; 121 ada_quirks quirks; 122 int ordered_tag_count; 123 int outstanding_cmds; 124 int trim_max_ranges; 125 int trim_running; 126 struct disk_params params; 127 struct disk *disk; 128 struct task sysctl_task; 129 struct sysctl_ctx_list sysctl_ctx; 130 struct sysctl_oid *sysctl_tree; 131 struct callout sendordered_c; 132 struct trim_request trim_req; 133 }; 134 135 struct ada_quirk_entry { 136 struct scsi_inquiry_pattern inq_pat; 137 ada_quirks quirks; 138 }; 139 140 static struct ada_quirk_entry ada_quirk_table[] = 141 { 142 { 143 /* Default */ 144 { 145 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 146 /*vendor*/"*", /*product*/"*", /*revision*/"*" 147 }, 148 /*quirks*/0 149 }, 150 }; 151 152 static disk_strategy_t adastrategy; 153 static dumper_t adadump; 154 static periph_init_t adainit; 155 static void adaasync(void *callback_arg, u_int32_t code, 156 struct cam_path *path, void *arg); 157 static void adasysctlinit(void *context, int pending); 158 static periph_ctor_t adaregister; 159 static periph_dtor_t adacleanup; 160 static periph_start_t adastart; 161 static periph_oninv_t adaoninvalidate; 162 static void adadone(struct cam_periph *periph, 163 union ccb *done_ccb); 164 static int adaerror(union ccb *ccb, u_int32_t cam_flags, 165 u_int32_t sense_flags); 166 static void adagetparams(struct cam_periph *periph, 167 struct ccb_getdev *cgd); 168 static timeout_t adasendorderedtag; 169 static void adashutdown(void *arg, int howto); 170 171 #ifndef ADA_DEFAULT_TIMEOUT 172 #define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ 173 #endif 174 175 #ifndef ADA_DEFAULT_RETRY 176 #define ADA_DEFAULT_RETRY 4 177 #endif 178 179 #ifndef ADA_DEFAULT_SEND_ORDERED 180 #define ADA_DEFAULT_SEND_ORDERED 1 181 #endif 182 183 /* 184 * Most platforms map firmware geometry to actual, but some don't. If 185 * not overridden, default to nothing. 186 */ 187 #ifndef ata_disk_firmware_geom_adjust 188 #define ata_disk_firmware_geom_adjust(disk) 189 #endif 190 191 static int ada_retry_count = ADA_DEFAULT_RETRY; 192 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT; 193 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED; 194 195 SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0, 196 "CAM Direct Access Disk driver"); 197 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW, 198 &ada_retry_count, 0, "Normal I/O retry count"); 199 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count); 200 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW, 201 &ada_default_timeout, 0, "Normal I/O timeout (in seconds)"); 202 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout); 203 SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW, 204 &ada_send_ordered, 0, "Send Ordered Tags"); 205 TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered); 206 207 /* 208 * ADA_ORDEREDTAG_INTERVAL determines how often, relative 209 * to the default timeout, we check to see whether an ordered 210 * tagged transaction is appropriate to prevent simple tag 211 * starvation. Since we'd like to ensure that there is at least 212 * 1/2 of the timeout length left for a starved transaction to 213 * complete after we've sent an ordered tag, we must poll at least 214 * four times in every timeout period. This takes care of the worst 215 * case where a starved transaction starts during an interval that 216 * meets the requirement "don't send an ordered tag" test so it takes 217 * us two intervals to determine that a tag must be sent. 218 */ 219 #ifndef ADA_ORDEREDTAG_INTERVAL 220 #define ADA_ORDEREDTAG_INTERVAL 4 221 #endif 222 223 static struct periph_driver adadriver = 224 { 225 adainit, "ada", 226 TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0 227 }; 228 229 PERIPHDRIVER_DECLARE(ada, adadriver); 230 231 MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers"); 232 233 static int 234 adaopen(struct disk *dp) 235 { 236 struct cam_periph *periph; 237 struct ada_softc *softc; 238 int unit; 239 int error; 240 241 periph = (struct cam_periph *)dp->d_drv1; 242 if (periph == NULL) { 243 return (ENXIO); 244 } 245 246 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 247 return(ENXIO); 248 } 249 250 cam_periph_lock(periph); 251 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { 252 cam_periph_unlock(periph); 253 cam_periph_release(periph); 254 return (error); 255 } 256 257 unit = periph->unit_number; 258 softc = (struct ada_softc *)periph->softc; 259 softc->flags |= ADA_FLAG_OPEN; 260 261 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 262 ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit, 263 unit)); 264 265 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) { 266 /* Invalidate our pack information. */ 267 softc->flags &= ~ADA_FLAG_PACK_INVALID; 268 } 269 270 cam_periph_unhold(periph); 271 cam_periph_unlock(periph); 272 return (0); 273 } 274 275 static int 276 adaclose(struct disk *dp) 277 { 278 struct cam_periph *periph; 279 struct ada_softc *softc; 280 union ccb *ccb; 281 int error; 282 283 periph = (struct cam_periph *)dp->d_drv1; 284 if (periph == NULL) 285 return (ENXIO); 286 287 cam_periph_lock(periph); 288 if ((error = cam_periph_hold(periph, PRIBIO)) != 0) { 289 cam_periph_unlock(periph); 290 cam_periph_release(periph); 291 return (error); 292 } 293 294 softc = (struct ada_softc *)periph->softc; 295 /* We only sync the cache if the drive is capable of it. */ 296 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) { 297 298 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 299 cam_fill_ataio(&ccb->ataio, 300 1, 301 adadone, 302 CAM_DIR_NONE, 303 0, 304 NULL, 305 0, 306 ada_default_timeout*1000); 307 308 if (softc->flags & ADA_FLAG_CAN_48BIT) 309 ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0); 310 else 311 ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0); 312 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 313 /*sense_flags*/0, softc->disk->d_devstat); 314 315 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 316 xpt_print(periph->path, "Synchronize cache failed\n"); 317 318 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 319 cam_release_devq(ccb->ccb_h.path, 320 /*relsim_flags*/0, 321 /*reduction*/0, 322 /*timeout*/0, 323 /*getcount_only*/0); 324 xpt_release_ccb(ccb); 325 } 326 327 softc->flags &= ~ADA_FLAG_OPEN; 328 cam_periph_unhold(periph); 329 cam_periph_unlock(periph); 330 cam_periph_release(periph); 331 return (0); 332 } 333 334 static void 335 adaschedule(struct cam_periph *periph) 336 { 337 struct ada_softc *softc = (struct ada_softc *)periph->softc; 338 339 if (bioq_first(&softc->bio_queue) || 340 (!softc->trim_running && bioq_first(&softc->trim_queue))) { 341 /* Have more work to do, so ensure we stay scheduled */ 342 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 343 } 344 } 345 346 /* 347 * Actually translate the requested transfer into one the physical driver 348 * can understand. The transfer is described by a buf and will include 349 * only one physical transfer. 350 */ 351 static void 352 adastrategy(struct bio *bp) 353 { 354 struct cam_periph *periph; 355 struct ada_softc *softc; 356 357 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 358 if (periph == NULL) { 359 biofinish(bp, NULL, ENXIO); 360 return; 361 } 362 softc = (struct ada_softc *)periph->softc; 363 364 cam_periph_lock(periph); 365 366 /* 367 * If the device has been made invalid, error out 368 */ 369 if ((softc->flags & ADA_FLAG_PACK_INVALID)) { 370 cam_periph_unlock(periph); 371 biofinish(bp, NULL, ENXIO); 372 return; 373 } 374 375 /* 376 * Place it in the queue of disk activities for this disk 377 */ 378 if (bp->bio_cmd == BIO_DELETE && 379 (softc->flags & ADA_FLAG_CAN_TRIM)) 380 bioq_disksort(&softc->trim_queue, bp); 381 else 382 bioq_disksort(&softc->bio_queue, bp); 383 384 /* 385 * Schedule ourselves for performing the work. 386 */ 387 adaschedule(periph); 388 cam_periph_unlock(periph); 389 390 return; 391 } 392 393 static int 394 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 395 { 396 struct cam_periph *periph; 397 struct ada_softc *softc; 398 u_int secsize; 399 union ccb ccb; 400 struct disk *dp; 401 uint64_t lba; 402 uint16_t count; 403 404 dp = arg; 405 periph = dp->d_drv1; 406 if (periph == NULL) 407 return (ENXIO); 408 softc = (struct ada_softc *)periph->softc; 409 cam_periph_lock(periph); 410 secsize = softc->params.secsize; 411 lba = offset / secsize; 412 count = length / secsize; 413 414 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) { 415 cam_periph_unlock(periph); 416 return (ENXIO); 417 } 418 419 if (length > 0) { 420 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 421 ccb.ccb_h.ccb_state = ADA_CCB_DUMP; 422 cam_fill_ataio(&ccb.ataio, 423 0, 424 adadone, 425 CAM_DIR_OUT, 426 0, 427 (u_int8_t *) virtual, 428 length, 429 ada_default_timeout*1000); 430 if ((softc->flags & ADA_FLAG_CAN_48BIT) && 431 (lba + count >= ATA_MAX_28BIT_LBA || 432 count >= 256)) { 433 ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48, 434 0, lba, count); 435 } else { 436 ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA, 437 0, lba, count); 438 } 439 xpt_polled_action(&ccb); 440 441 if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 442 printf("Aborting dump due to I/O error.\n"); 443 cam_periph_unlock(periph); 444 return(EIO); 445 } 446 cam_periph_unlock(periph); 447 return(0); 448 } 449 450 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) { 451 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 452 453 ccb.ccb_h.ccb_state = ADA_CCB_DUMP; 454 cam_fill_ataio(&ccb.ataio, 455 1, 456 adadone, 457 CAM_DIR_NONE, 458 0, 459 NULL, 460 0, 461 ada_default_timeout*1000); 462 463 if (softc->flags & ADA_FLAG_CAN_48BIT) 464 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0); 465 else 466 ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0); 467 xpt_polled_action(&ccb); 468 469 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 470 xpt_print(periph->path, "Synchronize cache failed\n"); 471 472 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 473 cam_release_devq(ccb.ccb_h.path, 474 /*relsim_flags*/0, 475 /*reduction*/0, 476 /*timeout*/0, 477 /*getcount_only*/0); 478 } 479 cam_periph_unlock(periph); 480 return (0); 481 } 482 483 static void 484 adainit(void) 485 { 486 cam_status status; 487 488 /* 489 * Install a global async callback. This callback will 490 * receive async callbacks like "new device found". 491 */ 492 status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL); 493 494 if (status != CAM_REQ_CMP) { 495 printf("ada: Failed to attach master async callback " 496 "due to status 0x%x!\n", status); 497 } else if (ada_send_ordered) { 498 499 /* Register our shutdown event handler */ 500 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown, 501 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 502 printf("adainit: shutdown event registration failed!\n"); 503 } 504 } 505 506 static void 507 adaoninvalidate(struct cam_periph *periph) 508 { 509 struct ada_softc *softc; 510 511 softc = (struct ada_softc *)periph->softc; 512 513 /* 514 * De-register any async callbacks. 515 */ 516 xpt_register_async(0, adaasync, periph, periph->path); 517 518 softc->flags |= ADA_FLAG_PACK_INVALID; 519 520 /* 521 * Return all queued I/O with ENXIO. 522 * XXX Handle any transactions queued to the card 523 * with XPT_ABORT_CCB. 524 */ 525 bioq_flush(&softc->bio_queue, NULL, ENXIO); 526 bioq_flush(&softc->trim_queue, NULL, ENXIO); 527 528 disk_gone(softc->disk); 529 xpt_print(periph->path, "lost device\n"); 530 } 531 532 static void 533 adacleanup(struct cam_periph *periph) 534 { 535 struct ada_softc *softc; 536 537 softc = (struct ada_softc *)periph->softc; 538 539 xpt_print(periph->path, "removing device entry\n"); 540 cam_periph_unlock(periph); 541 542 /* 543 * If we can't free the sysctl tree, oh well... 544 */ 545 if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0 546 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { 547 xpt_print(periph->path, "can't remove sysctl context\n"); 548 } 549 550 disk_destroy(softc->disk); 551 callout_drain(&softc->sendordered_c); 552 free(softc, M_DEVBUF); 553 cam_periph_lock(periph); 554 } 555 556 static void 557 adaasync(void *callback_arg, u_int32_t code, 558 struct cam_path *path, void *arg) 559 { 560 struct cam_periph *periph; 561 562 periph = (struct cam_periph *)callback_arg; 563 switch (code) { 564 case AC_FOUND_DEVICE: 565 { 566 struct ccb_getdev *cgd; 567 cam_status status; 568 569 cgd = (struct ccb_getdev *)arg; 570 if (cgd == NULL) 571 break; 572 573 if (cgd->protocol != PROTO_ATA) 574 break; 575 576 /* 577 * Allocate a peripheral instance for 578 * this device and start the probe 579 * process. 580 */ 581 status = cam_periph_alloc(adaregister, adaoninvalidate, 582 adacleanup, adastart, 583 "ada", CAM_PERIPH_BIO, 584 cgd->ccb_h.path, adaasync, 585 AC_FOUND_DEVICE, cgd); 586 587 if (status != CAM_REQ_CMP 588 && status != CAM_REQ_INPROG) 589 printf("adaasync: Unable to attach to new device " 590 "due to status 0x%x\n", status); 591 break; 592 } 593 default: 594 cam_periph_async(periph, code, path, arg); 595 break; 596 } 597 } 598 599 static void 600 adasysctlinit(void *context, int pending) 601 { 602 struct cam_periph *periph; 603 struct ada_softc *softc; 604 char tmpstr[80], tmpstr2[80]; 605 606 periph = (struct cam_periph *)context; 607 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 608 return; 609 610 softc = (struct ada_softc *)periph->softc; 611 snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number); 612 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 613 614 sysctl_ctx_init(&softc->sysctl_ctx); 615 softc->flags |= ADA_FLAG_SCTX_INIT; 616 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 617 SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2, 618 CTLFLAG_RD, 0, tmpstr); 619 if (softc->sysctl_tree == NULL) { 620 printf("adasysctlinit: unable to allocate sysctl tree\n"); 621 cam_periph_release(periph); 622 return; 623 } 624 625 cam_periph_release(periph); 626 } 627 628 static cam_status 629 adaregister(struct cam_periph *periph, void *arg) 630 { 631 struct ada_softc *softc; 632 struct ccb_pathinq cpi; 633 struct ccb_getdev *cgd; 634 char announce_buf[80]; 635 struct disk_params *dp; 636 caddr_t match; 637 u_int maxio; 638 639 cgd = (struct ccb_getdev *)arg; 640 if (periph == NULL) { 641 printf("adaregister: periph was NULL!!\n"); 642 return(CAM_REQ_CMP_ERR); 643 } 644 645 if (cgd == NULL) { 646 printf("adaregister: no getdev CCB, can't register device\n"); 647 return(CAM_REQ_CMP_ERR); 648 } 649 650 softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF, 651 M_NOWAIT|M_ZERO); 652 653 if (softc == NULL) { 654 printf("adaregister: Unable to probe new device. " 655 "Unable to allocate softc\n"); 656 return(CAM_REQ_CMP_ERR); 657 } 658 659 bioq_init(&softc->bio_queue); 660 bioq_init(&softc->trim_queue); 661 662 if (cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) 663 softc->flags |= ADA_FLAG_CAN_DMA; 664 if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) 665 softc->flags |= ADA_FLAG_CAN_48BIT; 666 if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE) 667 softc->flags |= ADA_FLAG_CAN_FLUSHCACHE; 668 if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ && 669 cgd->inq_flags & SID_CmdQue) 670 softc->flags |= ADA_FLAG_CAN_NCQ; 671 if (cgd->ident_data.support_dsm & ATA_SUPPORT_DSM_TRIM) { 672 softc->flags |= ADA_FLAG_CAN_TRIM; 673 softc->trim_max_ranges = TRIM_MAX_RANGES; 674 if (cgd->ident_data.max_dsm_blocks != 0) { 675 softc->trim_max_ranges = 676 min(cgd->ident_data.max_dsm_blocks * 64, 677 softc->trim_max_ranges); 678 } 679 } 680 if (cgd->ident_data.support.command2 & ATA_SUPPORT_CFA) 681 softc->flags |= ADA_FLAG_CAN_CFA; 682 softc->state = ADA_STATE_NORMAL; 683 684 periph->softc = softc; 685 686 /* 687 * See if this device has any quirks. 688 */ 689 match = cam_quirkmatch((caddr_t)&cgd->ident_data, 690 (caddr_t)ada_quirk_table, 691 sizeof(ada_quirk_table)/sizeof(*ada_quirk_table), 692 sizeof(*ada_quirk_table), ata_identify_match); 693 if (match != NULL) 694 softc->quirks = ((struct ada_quirk_entry *)match)->quirks; 695 else 696 softc->quirks = ADA_Q_NONE; 697 698 bzero(&cpi, sizeof(cpi)); 699 xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NONE); 700 cpi.ccb_h.func_code = XPT_PATH_INQ; 701 xpt_action((union ccb *)&cpi); 702 703 TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph); 704 705 /* 706 * Register this media as a disk 707 */ 708 mtx_unlock(periph->sim->mtx); 709 adagetparams(periph, cgd); 710 softc->disk = disk_alloc(); 711 softc->disk->d_open = adaopen; 712 softc->disk->d_close = adaclose; 713 softc->disk->d_strategy = adastrategy; 714 softc->disk->d_dump = adadump; 715 softc->disk->d_name = "ada"; 716 softc->disk->d_drv1 = periph; 717 maxio = cpi.maxio; /* Honor max I/O size of SIM */ 718 if (maxio == 0) 719 maxio = DFLTPHYS; /* traditional default */ 720 else if (maxio > MAXPHYS) 721 maxio = MAXPHYS; /* for safety */ 722 if (softc->flags & ADA_FLAG_CAN_48BIT) 723 maxio = min(maxio, 65536 * softc->params.secsize); 724 else /* 28bit ATA command limit */ 725 maxio = min(maxio, 256 * softc->params.secsize); 726 softc->disk->d_maxsize = maxio; 727 softc->disk->d_unit = periph->unit_number; 728 softc->disk->d_flags = 0; 729 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) 730 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 731 if ((softc->flags & ADA_FLAG_CAN_TRIM) || 732 ((softc->flags & ADA_FLAG_CAN_CFA) && 733 !(softc->flags & ADA_FLAG_CAN_48BIT))) 734 softc->disk->d_flags |= DISKFLAG_CANDELETE; 735 strlcpy(softc->disk->d_ident, cgd->serial_num, 736 MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1)); 737 738 softc->disk->d_sectorsize = softc->params.secsize; 739 softc->disk->d_mediasize = (off_t)softc->params.sectors * 740 softc->params.secsize; 741 if (ata_physical_sector_size(&cgd->ident_data) != 742 softc->params.secsize) { 743 softc->disk->d_stripesize = 744 ata_physical_sector_size(&cgd->ident_data); 745 softc->disk->d_stripeoffset = (softc->disk->d_stripesize - 746 ata_logical_sector_offset(&cgd->ident_data)) % 747 softc->disk->d_stripesize; 748 } 749 softc->disk->d_fwsectors = softc->params.secs_per_track; 750 softc->disk->d_fwheads = softc->params.heads; 751 ata_disk_firmware_geom_adjust(softc->disk); 752 753 disk_create(softc->disk, DISK_VERSION); 754 mtx_lock(periph->sim->mtx); 755 756 dp = &softc->params; 757 snprintf(announce_buf, sizeof(announce_buf), 758 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)", 759 (uintmax_t)(((uintmax_t)dp->secsize * 760 dp->sectors) / (1024*1024)), 761 (uintmax_t)dp->sectors, 762 dp->secsize, dp->heads, 763 dp->secs_per_track, dp->cylinders); 764 xpt_announce_periph(periph, announce_buf); 765 /* 766 * Add async callbacks for bus reset and 767 * bus device reset calls. I don't bother 768 * checking if this fails as, in most cases, 769 * the system will function just fine without 770 * them and the only alternative would be to 771 * not attach the device on failure. 772 */ 773 xpt_register_async(AC_LOST_DEVICE, 774 adaasync, periph, periph->path); 775 776 /* 777 * Schedule a periodic event to occasionally send an 778 * ordered tag to a device. 779 */ 780 callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0); 781 callout_reset(&softc->sendordered_c, 782 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL, 783 adasendorderedtag, softc); 784 785 return(CAM_REQ_CMP); 786 } 787 788 static void 789 adastart(struct cam_periph *periph, union ccb *start_ccb) 790 { 791 struct ada_softc *softc = (struct ada_softc *)periph->softc; 792 struct ccb_ataio *ataio = &start_ccb->ataio; 793 794 switch (softc->state) { 795 case ADA_STATE_NORMAL: 796 { 797 struct bio *bp; 798 u_int8_t tag_code; 799 800 /* Execute immediate CCB if waiting. */ 801 if (periph->immediate_priority <= periph->pinfo.priority) { 802 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 803 ("queuing for immediate ccb\n")); 804 start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING; 805 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 806 periph_links.sle); 807 periph->immediate_priority = CAM_PRIORITY_NONE; 808 wakeup(&periph->ccb_list); 809 /* Have more work to do, so ensure we stay scheduled */ 810 adaschedule(periph); 811 break; 812 } 813 /* Run TRIM if not running yet. */ 814 if (!softc->trim_running && 815 (bp = bioq_first(&softc->trim_queue)) != 0) { 816 struct trim_request *req = &softc->trim_req; 817 struct bio *bp1; 818 int bps = 0, ranges = 0; 819 820 softc->trim_running = 1; 821 bzero(req, sizeof(*req)); 822 bp1 = bp; 823 do { 824 uint64_t lba = bp1->bio_pblkno; 825 int count = bp1->bio_bcount / 826 softc->params.secsize; 827 828 bioq_remove(&softc->trim_queue, bp1); 829 while (count > 0) { 830 int c = min(count, 0xffff); 831 int off = ranges * 8; 832 833 req->data[off + 0] = lba & 0xff; 834 req->data[off + 1] = (lba >> 8) & 0xff; 835 req->data[off + 2] = (lba >> 16) & 0xff; 836 req->data[off + 3] = (lba >> 24) & 0xff; 837 req->data[off + 4] = (lba >> 32) & 0xff; 838 req->data[off + 5] = (lba >> 40) & 0xff; 839 req->data[off + 6] = c & 0xff; 840 req->data[off + 7] = (c >> 8) & 0xff; 841 lba += c; 842 count -= c; 843 ranges++; 844 } 845 req->bps[bps++] = bp1; 846 bp1 = bioq_first(&softc->trim_queue); 847 if (bp1 == NULL || 848 bp1->bio_bcount / softc->params.secsize > 849 (softc->trim_max_ranges - ranges) * 0xffff) 850 break; 851 } while (1); 852 cam_fill_ataio(ataio, 853 ada_retry_count, 854 adadone, 855 CAM_DIR_OUT, 856 0, 857 req->data, 858 ((ranges + 63) / 64) * 512, 859 ada_default_timeout * 1000); 860 ata_48bit_cmd(ataio, ATA_DATA_SET_MANAGEMENT, 861 ATA_DSM_TRIM, 0, (ranges + 63) / 64); 862 start_ccb->ccb_h.ccb_state = ADA_CCB_TRIM; 863 goto out; 864 } 865 /* Run regular command. */ 866 bp = bioq_first(&softc->bio_queue); 867 if (bp == NULL) { 868 xpt_release_ccb(start_ccb); 869 break; 870 } 871 bioq_remove(&softc->bio_queue, bp); 872 873 if ((softc->flags & ADA_FLAG_NEED_OTAG) != 0) { 874 softc->flags &= ~ADA_FLAG_NEED_OTAG; 875 softc->ordered_tag_count++; 876 tag_code = 0; 877 } else { 878 tag_code = 1; 879 } 880 switch (bp->bio_cmd) { 881 case BIO_READ: 882 case BIO_WRITE: 883 { 884 uint64_t lba = bp->bio_pblkno; 885 uint16_t count = bp->bio_bcount / softc->params.secsize; 886 887 cam_fill_ataio(ataio, 888 ada_retry_count, 889 adadone, 890 bp->bio_cmd == BIO_READ ? 891 CAM_DIR_IN : CAM_DIR_OUT, 892 tag_code, 893 bp->bio_data, 894 bp->bio_bcount, 895 ada_default_timeout*1000); 896 897 if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) { 898 if (bp->bio_cmd == BIO_READ) { 899 ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED, 900 lba, count); 901 } else { 902 ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED, 903 lba, count); 904 } 905 } else if ((softc->flags & ADA_FLAG_CAN_48BIT) && 906 (lba + count >= ATA_MAX_28BIT_LBA || 907 count > 256)) { 908 if (softc->flags & ADA_FLAG_CAN_DMA) { 909 if (bp->bio_cmd == BIO_READ) { 910 ata_48bit_cmd(ataio, ATA_READ_DMA48, 911 0, lba, count); 912 } else { 913 ata_48bit_cmd(ataio, ATA_WRITE_DMA48, 914 0, lba, count); 915 } 916 } else { 917 if (bp->bio_cmd == BIO_READ) { 918 ata_48bit_cmd(ataio, ATA_READ_MUL48, 919 0, lba, count); 920 } else { 921 ata_48bit_cmd(ataio, ATA_WRITE_MUL48, 922 0, lba, count); 923 } 924 } 925 } else { 926 if (count == 256) 927 count = 0; 928 if (softc->flags & ADA_FLAG_CAN_DMA) { 929 if (bp->bio_cmd == BIO_READ) { 930 ata_28bit_cmd(ataio, ATA_READ_DMA, 931 0, lba, count); 932 } else { 933 ata_28bit_cmd(ataio, ATA_WRITE_DMA, 934 0, lba, count); 935 } 936 } else { 937 if (bp->bio_cmd == BIO_READ) { 938 ata_28bit_cmd(ataio, ATA_READ_MUL, 939 0, lba, count); 940 } else { 941 ata_28bit_cmd(ataio, ATA_WRITE_MUL, 942 0, lba, count); 943 } 944 } 945 } 946 break; 947 } 948 case BIO_DELETE: 949 { 950 uint64_t lba = bp->bio_pblkno; 951 uint16_t count = bp->bio_bcount / softc->params.secsize; 952 953 cam_fill_ataio(ataio, 954 ada_retry_count, 955 adadone, 956 CAM_DIR_NONE, 957 0, 958 NULL, 959 0, 960 ada_default_timeout*1000); 961 962 if (count >= 256) 963 count = 0; 964 ata_28bit_cmd(ataio, ATA_CFA_ERASE, 0, lba, count); 965 break; 966 } 967 case BIO_FLUSH: 968 cam_fill_ataio(ataio, 969 1, 970 adadone, 971 CAM_DIR_NONE, 972 0, 973 NULL, 974 0, 975 ada_default_timeout*1000); 976 977 if (softc->flags & ADA_FLAG_CAN_48BIT) 978 ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0); 979 else 980 ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0); 981 break; 982 } 983 start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO; 984 out: 985 start_ccb->ccb_h.ccb_bp = bp; 986 softc->outstanding_cmds++; 987 xpt_action(start_ccb); 988 989 /* May have more work to do, so ensure we stay scheduled */ 990 adaschedule(periph); 991 break; 992 } 993 } 994 } 995 996 static void 997 adadone(struct cam_periph *periph, union ccb *done_ccb) 998 { 999 struct ada_softc *softc; 1000 struct ccb_ataio *ataio; 1001 1002 softc = (struct ada_softc *)periph->softc; 1003 ataio = &done_ccb->ataio; 1004 switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) { 1005 case ADA_CCB_BUFFER_IO: 1006 case ADA_CCB_TRIM: 1007 { 1008 struct bio *bp; 1009 1010 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 1011 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 1012 int error; 1013 1014 error = adaerror(done_ccb, 0, 0); 1015 if (error == ERESTART) { 1016 /* A retry was scheduled, so just return. */ 1017 return; 1018 } 1019 if (error != 0) { 1020 if (error == ENXIO) { 1021 /* 1022 * Catastrophic error. Mark our pack as 1023 * invalid. 1024 */ 1025 /* 1026 * XXX See if this is really a media 1027 * XXX change first? 1028 */ 1029 xpt_print(periph->path, 1030 "Invalidating pack\n"); 1031 softc->flags |= ADA_FLAG_PACK_INVALID; 1032 } 1033 bp->bio_error = error; 1034 bp->bio_resid = bp->bio_bcount; 1035 bp->bio_flags |= BIO_ERROR; 1036 } else { 1037 bp->bio_resid = ataio->resid; 1038 bp->bio_error = 0; 1039 if (bp->bio_resid != 0) 1040 bp->bio_flags |= BIO_ERROR; 1041 } 1042 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1043 cam_release_devq(done_ccb->ccb_h.path, 1044 /*relsim_flags*/0, 1045 /*reduction*/0, 1046 /*timeout*/0, 1047 /*getcount_only*/0); 1048 } else { 1049 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 1050 panic("REQ_CMP with QFRZN"); 1051 bp->bio_resid = ataio->resid; 1052 if (ataio->resid > 0) 1053 bp->bio_flags |= BIO_ERROR; 1054 } 1055 softc->outstanding_cmds--; 1056 if (softc->outstanding_cmds == 0) 1057 softc->flags |= ADA_FLAG_WENT_IDLE; 1058 if ((ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) == 1059 ADA_CCB_TRIM) { 1060 struct trim_request *req = 1061 (struct trim_request *)ataio->data_ptr; 1062 int i; 1063 1064 for (i = 1; i < softc->trim_max_ranges && 1065 req->bps[i]; i++) { 1066 struct bio *bp1 = req->bps[i]; 1067 1068 bp1->bio_resid = bp->bio_resid; 1069 bp1->bio_error = bp->bio_error; 1070 if (bp->bio_flags & BIO_ERROR) 1071 bp1->bio_flags |= BIO_ERROR; 1072 biodone(bp1); 1073 } 1074 softc->trim_running = 0; 1075 biodone(bp); 1076 adaschedule(periph); 1077 } else 1078 biodone(bp); 1079 break; 1080 } 1081 case ADA_CCB_WAITING: 1082 { 1083 /* Caller will release the CCB */ 1084 wakeup(&done_ccb->ccb_h.cbfcnp); 1085 return; 1086 } 1087 case ADA_CCB_DUMP: 1088 /* No-op. We're polling */ 1089 return; 1090 default: 1091 break; 1092 } 1093 xpt_release_ccb(done_ccb); 1094 } 1095 1096 static int 1097 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 1098 { 1099 struct ada_softc *softc; 1100 struct cam_periph *periph; 1101 1102 periph = xpt_path_periph(ccb->ccb_h.path); 1103 softc = (struct ada_softc *)periph->softc; 1104 1105 return(cam_periph_error(ccb, cam_flags, sense_flags, NULL)); 1106 } 1107 1108 static void 1109 adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd) 1110 { 1111 struct ada_softc *softc = (struct ada_softc *)periph->softc; 1112 struct disk_params *dp = &softc->params; 1113 u_int64_t lbasize48; 1114 u_int32_t lbasize; 1115 1116 dp->secsize = ata_logical_sector_size(&cgd->ident_data); 1117 if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) && 1118 cgd->ident_data.current_heads && cgd->ident_data.current_sectors) { 1119 dp->heads = cgd->ident_data.current_heads; 1120 dp->secs_per_track = cgd->ident_data.current_sectors; 1121 dp->cylinders = cgd->ident_data.cylinders; 1122 dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 | 1123 ((u_int32_t)cgd->ident_data.current_size_2 << 16); 1124 } else { 1125 dp->heads = cgd->ident_data.heads; 1126 dp->secs_per_track = cgd->ident_data.sectors; 1127 dp->cylinders = cgd->ident_data.cylinders; 1128 dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track; 1129 } 1130 lbasize = (u_int32_t)cgd->ident_data.lba_size_1 | 1131 ((u_int32_t)cgd->ident_data.lba_size_2 << 16); 1132 1133 /* use the 28bit LBA size if valid or bigger than the CHS mapping */ 1134 if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize) 1135 dp->sectors = lbasize; 1136 1137 /* use the 48bit LBA size if valid */ 1138 lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) | 1139 ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) | 1140 ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) | 1141 ((u_int64_t)cgd->ident_data.lba_size48_4 << 48); 1142 if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) && 1143 lbasize48 > ATA_MAX_28BIT_LBA) 1144 dp->sectors = lbasize48; 1145 } 1146 1147 static void 1148 adasendorderedtag(void *arg) 1149 { 1150 struct ada_softc *softc = arg; 1151 1152 if (ada_send_ordered) { 1153 if ((softc->ordered_tag_count == 0) 1154 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) { 1155 softc->flags |= ADA_FLAG_NEED_OTAG; 1156 } 1157 if (softc->outstanding_cmds > 0) 1158 softc->flags &= ~ADA_FLAG_WENT_IDLE; 1159 1160 softc->ordered_tag_count = 0; 1161 } 1162 /* Queue us up again */ 1163 callout_reset(&softc->sendordered_c, 1164 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL, 1165 adasendorderedtag, softc); 1166 } 1167 1168 /* 1169 * Step through all ADA peripheral drivers, and if the device is still open, 1170 * sync the disk cache to physical media. 1171 */ 1172 static void 1173 adashutdown(void * arg, int howto) 1174 { 1175 struct cam_periph *periph; 1176 struct ada_softc *softc; 1177 1178 TAILQ_FOREACH(periph, &adadriver.units, unit_links) { 1179 union ccb ccb; 1180 1181 /* If we paniced with lock held - not recurse here. */ 1182 if (cam_periph_owned(periph)) 1183 continue; 1184 cam_periph_lock(periph); 1185 softc = (struct ada_softc *)periph->softc; 1186 /* 1187 * We only sync the cache if the drive is still open, and 1188 * if the drive is capable of it.. 1189 */ 1190 if (((softc->flags & ADA_FLAG_OPEN) == 0) || 1191 (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) { 1192 cam_periph_unlock(periph); 1193 continue; 1194 } 1195 1196 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1197 1198 ccb.ccb_h.ccb_state = ADA_CCB_DUMP; 1199 cam_fill_ataio(&ccb.ataio, 1200 1, 1201 adadone, 1202 CAM_DIR_NONE, 1203 0, 1204 NULL, 1205 0, 1206 ada_default_timeout*1000); 1207 1208 if (softc->flags & ADA_FLAG_CAN_48BIT) 1209 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0); 1210 else 1211 ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0); 1212 xpt_polled_action(&ccb); 1213 1214 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 1215 xpt_print(periph->path, "Synchronize cache failed\n"); 1216 1217 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 1218 cam_release_devq(ccb.ccb_h.path, 1219 /*relsim_flags*/0, 1220 /*reduction*/0, 1221 /*timeout*/0, 1222 /*getcount_only*/0); 1223 cam_periph_unlock(periph); 1224 } 1225 } 1226 1227 #endif /* _KERNEL */ 1228