1 /*- 2 * Copyright (c) 2009 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 32 #ifdef _KERNEL 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/bio.h> 36 #include <sys/sysctl.h> 37 #include <sys/taskqueue.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/conf.h> 41 #include <sys/devicestat.h> 42 #include <sys/eventhandler.h> 43 #include <sys/malloc.h> 44 #include <sys/cons.h> 45 #include <geom/geom_disk.h> 46 #endif /* _KERNEL */ 47 48 #ifndef _KERNEL 49 #include <stdio.h> 50 #include <string.h> 51 #endif /* _KERNEL */ 52 53 #include <cam/cam.h> 54 #include <cam/cam_ccb.h> 55 #include <cam/cam_periph.h> 56 #include <cam/cam_xpt_periph.h> 57 #include <cam/cam_sim.h> 58 59 #include <cam/ata/ata_all.h> 60 61 #ifdef _KERNEL 62 63 #define ATA_MAX_28BIT_LBA 268435455UL 64 65 typedef enum { 66 ADA_STATE_NORMAL 67 } ada_state; 68 69 typedef enum { 70 ADA_FLAG_PACK_INVALID = 0x001, 71 ADA_FLAG_CAN_48BIT = 0x002, 72 ADA_FLAG_CAN_FLUSHCACHE = 0x004, 73 ADA_FLAG_CAN_NCQ = 0x008, 74 ADA_FLAG_CAN_DMA = 0x010, 75 ADA_FLAG_NEED_OTAG = 0x020, 76 ADA_FLAG_WENT_IDLE = 0x040, 77 ADA_FLAG_OPEN = 0x100, 78 ADA_FLAG_SCTX_INIT = 0x200 79 } ada_flags; 80 81 typedef enum { 82 ADA_Q_NONE = 0x00 83 } ada_quirks; 84 85 typedef enum { 86 ADA_CCB_BUFFER_IO = 0x03, 87 ADA_CCB_WAITING = 0x04, 88 ADA_CCB_DUMP = 0x05, 89 ADA_CCB_TYPE_MASK = 0x0F, 90 } ada_ccb_state; 91 92 /* Offsets into our private area for storing information */ 93 #define ccb_state ppriv_field0 94 #define ccb_bp ppriv_ptr1 95 96 struct disk_params { 97 u_int8_t heads; 98 u_int8_t secs_per_track; 99 u_int32_t cylinders; 100 u_int32_t secsize; /* Number of bytes/logical sector */ 101 u_int64_t sectors; /* Total number sectors */ 102 }; 103 104 struct ada_softc { 105 struct bio_queue_head bio_queue; 106 ada_state state; 107 ada_flags flags; 108 ada_quirks quirks; 109 int ordered_tag_count; 110 int outstanding_cmds; 111 struct disk_params params; 112 struct disk *disk; 113 union ccb saved_ccb; 114 struct task sysctl_task; 115 struct sysctl_ctx_list sysctl_ctx; 116 struct sysctl_oid *sysctl_tree; 117 struct callout sendordered_c; 118 }; 119 120 struct ada_quirk_entry { 121 struct scsi_inquiry_pattern inq_pat; 122 ada_quirks quirks; 123 }; 124 125 static struct ada_quirk_entry ada_quirk_table[] = 126 { 127 { 128 /* Default */ 129 { 130 T_ANY, SIP_MEDIA_REMOVABLE|SIP_MEDIA_FIXED, 131 /*vendor*/"*", /*product*/"*", /*revision*/"*" 132 }, 133 /*quirks*/0 134 }, 135 }; 136 137 static disk_strategy_t adastrategy; 138 static dumper_t adadump; 139 static periph_init_t adainit; 140 static void adaasync(void *callback_arg, u_int32_t code, 141 struct cam_path *path, void *arg); 142 static void adasysctlinit(void *context, int pending); 143 static periph_ctor_t adaregister; 144 static periph_dtor_t adacleanup; 145 static periph_start_t adastart; 146 static periph_oninv_t adaoninvalidate; 147 static void adadone(struct cam_periph *periph, 148 union ccb *done_ccb); 149 static int adaerror(union ccb *ccb, u_int32_t cam_flags, 150 u_int32_t sense_flags); 151 static void adagetparams(struct cam_periph *periph, 152 struct ccb_getdev *cgd); 153 static timeout_t adasendorderedtag; 154 static void adashutdown(void *arg, int howto); 155 156 #ifndef ADA_DEFAULT_TIMEOUT 157 #define ADA_DEFAULT_TIMEOUT 30 /* Timeout in seconds */ 158 #endif 159 160 #ifndef ADA_DEFAULT_RETRY 161 #define ADA_DEFAULT_RETRY 4 162 #endif 163 164 #ifndef ADA_DEFAULT_SEND_ORDERED 165 #define ADA_DEFAULT_SEND_ORDERED 1 166 #endif 167 168 169 static int ada_retry_count = ADA_DEFAULT_RETRY; 170 static int ada_default_timeout = ADA_DEFAULT_TIMEOUT; 171 static int ada_send_ordered = ADA_DEFAULT_SEND_ORDERED; 172 173 SYSCTL_NODE(_kern_cam, OID_AUTO, ada, CTLFLAG_RD, 0, 174 "CAM Direct Access Disk driver"); 175 SYSCTL_INT(_kern_cam_ada, OID_AUTO, retry_count, CTLFLAG_RW, 176 &ada_retry_count, 0, "Normal I/O retry count"); 177 TUNABLE_INT("kern.cam.ada.retry_count", &ada_retry_count); 178 SYSCTL_INT(_kern_cam_ada, OID_AUTO, default_timeout, CTLFLAG_RW, 179 &ada_default_timeout, 0, "Normal I/O timeout (in seconds)"); 180 TUNABLE_INT("kern.cam.ada.default_timeout", &ada_default_timeout); 181 SYSCTL_INT(_kern_cam_ada, OID_AUTO, ada_send_ordered, CTLFLAG_RW, 182 &ada_send_ordered, 0, "Send Ordered Tags"); 183 TUNABLE_INT("kern.cam.ada.ada_send_ordered", &ada_send_ordered); 184 185 /* 186 * ADA_ORDEREDTAG_INTERVAL determines how often, relative 187 * to the default timeout, we check to see whether an ordered 188 * tagged transaction is appropriate to prevent simple tag 189 * starvation. Since we'd like to ensure that there is at least 190 * 1/2 of the timeout length left for a starved transaction to 191 * complete after we've sent an ordered tag, we must poll at least 192 * four times in every timeout period. This takes care of the worst 193 * case where a starved transaction starts during an interval that 194 * meets the requirement "don't send an ordered tag" test so it takes 195 * us two intervals to determine that a tag must be sent. 196 */ 197 #ifndef ADA_ORDEREDTAG_INTERVAL 198 #define ADA_ORDEREDTAG_INTERVAL 4 199 #endif 200 201 static struct periph_driver adadriver = 202 { 203 adainit, "ada", 204 TAILQ_HEAD_INITIALIZER(adadriver.units), /* generation */ 0 205 }; 206 207 PERIPHDRIVER_DECLARE(ada, adadriver); 208 209 MALLOC_DEFINE(M_ATADA, "ata_da", "ata_da buffers"); 210 211 static int 212 adaopen(struct disk *dp) 213 { 214 struct cam_periph *periph; 215 struct ada_softc *softc; 216 int unit; 217 int error; 218 219 periph = (struct cam_periph *)dp->d_drv1; 220 if (periph == NULL) { 221 return (ENXIO); 222 } 223 224 if (cam_periph_acquire(periph) != CAM_REQ_CMP) { 225 return(ENXIO); 226 } 227 228 cam_periph_lock(periph); 229 if ((error = cam_periph_hold(periph, PRIBIO|PCATCH)) != 0) { 230 cam_periph_unlock(periph); 231 cam_periph_release(periph); 232 return (error); 233 } 234 235 unit = periph->unit_number; 236 softc = (struct ada_softc *)periph->softc; 237 softc->flags |= ADA_FLAG_OPEN; 238 239 CAM_DEBUG(periph->path, CAM_DEBUG_TRACE, 240 ("adaopen: disk=%s%d (unit %d)\n", dp->d_name, dp->d_unit, 241 unit)); 242 243 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) { 244 /* Invalidate our pack information. */ 245 softc->flags &= ~ADA_FLAG_PACK_INVALID; 246 } 247 248 cam_periph_unhold(periph); 249 cam_periph_unlock(periph); 250 return (0); 251 } 252 253 static int 254 adaclose(struct disk *dp) 255 { 256 struct cam_periph *periph; 257 struct ada_softc *softc; 258 union ccb *ccb; 259 int error; 260 261 periph = (struct cam_periph *)dp->d_drv1; 262 if (periph == NULL) 263 return (ENXIO); 264 265 cam_periph_lock(periph); 266 if ((error = cam_periph_hold(periph, PRIBIO)) != 0) { 267 cam_periph_unlock(periph); 268 cam_periph_release(periph); 269 return (error); 270 } 271 272 softc = (struct ada_softc *)periph->softc; 273 /* We only sync the cache if the drive is capable of it. */ 274 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) { 275 276 ccb = cam_periph_getccb(periph, CAM_PRIORITY_NORMAL); 277 cam_fill_ataio(&ccb->ataio, 278 1, 279 adadone, 280 CAM_DIR_NONE, 281 0, 282 NULL, 283 0, 284 ada_default_timeout*1000); 285 286 if (softc->flags & ADA_FLAG_CAN_48BIT) 287 ata_48bit_cmd(&ccb->ataio, ATA_FLUSHCACHE48, 0, 0, 0); 288 else 289 ata_28bit_cmd(&ccb->ataio, ATA_FLUSHCACHE, 0, 0, 0); 290 cam_periph_runccb(ccb, /*error_routine*/NULL, /*cam_flags*/0, 291 /*sense_flags*/0, softc->disk->d_devstat); 292 293 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 294 xpt_print(periph->path, "Synchronize cache failed\n"); 295 296 if ((ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 297 cam_release_devq(ccb->ccb_h.path, 298 /*relsim_flags*/0, 299 /*reduction*/0, 300 /*timeout*/0, 301 /*getcount_only*/0); 302 xpt_release_ccb(ccb); 303 } 304 305 softc->flags &= ~ADA_FLAG_OPEN; 306 cam_periph_unhold(periph); 307 cam_periph_unlock(periph); 308 cam_periph_release(periph); 309 return (0); 310 } 311 312 /* 313 * Actually translate the requested transfer into one the physical driver 314 * can understand. The transfer is described by a buf and will include 315 * only one physical transfer. 316 */ 317 static void 318 adastrategy(struct bio *bp) 319 { 320 struct cam_periph *periph; 321 struct ada_softc *softc; 322 323 periph = (struct cam_periph *)bp->bio_disk->d_drv1; 324 if (periph == NULL) { 325 biofinish(bp, NULL, ENXIO); 326 return; 327 } 328 softc = (struct ada_softc *)periph->softc; 329 330 cam_periph_lock(periph); 331 332 /* 333 * If the device has been made invalid, error out 334 */ 335 if ((softc->flags & ADA_FLAG_PACK_INVALID)) { 336 cam_periph_unlock(periph); 337 biofinish(bp, NULL, ENXIO); 338 return; 339 } 340 341 /* 342 * Place it in the queue of disk activities for this disk 343 */ 344 bioq_disksort(&softc->bio_queue, bp); 345 346 /* 347 * Schedule ourselves for performing the work. 348 */ 349 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 350 cam_periph_unlock(periph); 351 352 return; 353 } 354 355 static int 356 adadump(void *arg, void *virtual, vm_offset_t physical, off_t offset, size_t length) 357 { 358 struct cam_periph *periph; 359 struct ada_softc *softc; 360 u_int secsize; 361 union ccb ccb; 362 struct disk *dp; 363 uint64_t lba; 364 uint16_t count; 365 366 dp = arg; 367 periph = dp->d_drv1; 368 if (periph == NULL) 369 return (ENXIO); 370 softc = (struct ada_softc *)periph->softc; 371 cam_periph_lock(periph); 372 secsize = softc->params.secsize; 373 lba = offset / secsize; 374 count = length / secsize; 375 376 if ((softc->flags & ADA_FLAG_PACK_INVALID) != 0) { 377 cam_periph_unlock(periph); 378 return (ENXIO); 379 } 380 381 if (length > 0) { 382 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 383 ccb.ccb_h.ccb_state = ADA_CCB_DUMP; 384 cam_fill_ataio(&ccb.ataio, 385 0, 386 adadone, 387 CAM_DIR_OUT, 388 0, 389 (u_int8_t *) virtual, 390 length, 391 ada_default_timeout*1000); 392 if ((softc->flags & ADA_FLAG_CAN_48BIT) && 393 (lba + count >= ATA_MAX_28BIT_LBA || 394 count >= 256)) { 395 ata_48bit_cmd(&ccb.ataio, ATA_WRITE_DMA48, 396 0, lba, count); 397 } else { 398 ata_28bit_cmd(&ccb.ataio, ATA_WRITE_DMA, 399 0, lba, count); 400 } 401 xpt_polled_action(&ccb); 402 403 if ((ccb.ataio.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 404 printf("Aborting dump due to I/O error.\n"); 405 cam_periph_unlock(periph); 406 return(EIO); 407 } 408 cam_periph_unlock(periph); 409 return(0); 410 } 411 412 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) { 413 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 414 415 ccb.ccb_h.ccb_state = ADA_CCB_DUMP; 416 cam_fill_ataio(&ccb.ataio, 417 1, 418 adadone, 419 CAM_DIR_NONE, 420 0, 421 NULL, 422 0, 423 ada_default_timeout*1000); 424 425 if (softc->flags & ADA_FLAG_CAN_48BIT) 426 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0); 427 else 428 ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0); 429 xpt_polled_action(&ccb); 430 431 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 432 xpt_print(periph->path, "Synchronize cache failed\n"); 433 434 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 435 cam_release_devq(ccb.ccb_h.path, 436 /*relsim_flags*/0, 437 /*reduction*/0, 438 /*timeout*/0, 439 /*getcount_only*/0); 440 } 441 cam_periph_unlock(periph); 442 return (0); 443 } 444 445 static void 446 adainit(void) 447 { 448 cam_status status; 449 450 /* 451 * Install a global async callback. This callback will 452 * receive async callbacks like "new device found". 453 */ 454 status = xpt_register_async(AC_FOUND_DEVICE, adaasync, NULL, NULL); 455 456 if (status != CAM_REQ_CMP) { 457 printf("ada: Failed to attach master async callback " 458 "due to status 0x%x!\n", status); 459 } else if (ada_send_ordered) { 460 461 /* Register our shutdown event handler */ 462 if ((EVENTHANDLER_REGISTER(shutdown_post_sync, adashutdown, 463 NULL, SHUTDOWN_PRI_DEFAULT)) == NULL) 464 printf("adainit: shutdown event registration failed!\n"); 465 } 466 } 467 468 static void 469 adaoninvalidate(struct cam_periph *periph) 470 { 471 struct ada_softc *softc; 472 473 softc = (struct ada_softc *)periph->softc; 474 475 /* 476 * De-register any async callbacks. 477 */ 478 xpt_register_async(0, adaasync, periph, periph->path); 479 480 softc->flags |= ADA_FLAG_PACK_INVALID; 481 482 /* 483 * Return all queued I/O with ENXIO. 484 * XXX Handle any transactions queued to the card 485 * with XPT_ABORT_CCB. 486 */ 487 bioq_flush(&softc->bio_queue, NULL, ENXIO); 488 489 disk_gone(softc->disk); 490 xpt_print(periph->path, "lost device\n"); 491 } 492 493 static void 494 adacleanup(struct cam_periph *periph) 495 { 496 struct ada_softc *softc; 497 498 softc = (struct ada_softc *)periph->softc; 499 500 xpt_print(periph->path, "removing device entry\n"); 501 cam_periph_unlock(periph); 502 503 /* 504 * If we can't free the sysctl tree, oh well... 505 */ 506 if ((softc->flags & ADA_FLAG_SCTX_INIT) != 0 507 && sysctl_ctx_free(&softc->sysctl_ctx) != 0) { 508 xpt_print(periph->path, "can't remove sysctl context\n"); 509 } 510 511 disk_destroy(softc->disk); 512 callout_drain(&softc->sendordered_c); 513 free(softc, M_DEVBUF); 514 cam_periph_lock(periph); 515 } 516 517 static void 518 adaasync(void *callback_arg, u_int32_t code, 519 struct cam_path *path, void *arg) 520 { 521 struct cam_periph *periph; 522 523 periph = (struct cam_periph *)callback_arg; 524 switch (code) { 525 case AC_FOUND_DEVICE: 526 { 527 struct ccb_getdev *cgd; 528 cam_status status; 529 530 cgd = (struct ccb_getdev *)arg; 531 if (cgd == NULL) 532 break; 533 534 if (cgd->protocol != PROTO_ATA) 535 break; 536 537 /* 538 * Allocate a peripheral instance for 539 * this device and start the probe 540 * process. 541 */ 542 status = cam_periph_alloc(adaregister, adaoninvalidate, 543 adacleanup, adastart, 544 "ada", CAM_PERIPH_BIO, 545 cgd->ccb_h.path, adaasync, 546 AC_FOUND_DEVICE, cgd); 547 548 if (status != CAM_REQ_CMP 549 && status != CAM_REQ_INPROG) 550 printf("adaasync: Unable to attach to new device " 551 "due to status 0x%x\n", status); 552 break; 553 } 554 default: 555 cam_periph_async(periph, code, path, arg); 556 break; 557 } 558 } 559 560 static void 561 adasysctlinit(void *context, int pending) 562 { 563 struct cam_periph *periph; 564 struct ada_softc *softc; 565 char tmpstr[80], tmpstr2[80]; 566 567 periph = (struct cam_periph *)context; 568 if (cam_periph_acquire(periph) != CAM_REQ_CMP) 569 return; 570 571 softc = (struct ada_softc *)periph->softc; 572 snprintf(tmpstr, sizeof(tmpstr), "CAM ADA unit %d", periph->unit_number); 573 snprintf(tmpstr2, sizeof(tmpstr2), "%d", periph->unit_number); 574 575 sysctl_ctx_init(&softc->sysctl_ctx); 576 softc->flags |= ADA_FLAG_SCTX_INIT; 577 softc->sysctl_tree = SYSCTL_ADD_NODE(&softc->sysctl_ctx, 578 SYSCTL_STATIC_CHILDREN(_kern_cam_ada), OID_AUTO, tmpstr2, 579 CTLFLAG_RD, 0, tmpstr); 580 if (softc->sysctl_tree == NULL) { 581 printf("adasysctlinit: unable to allocate sysctl tree\n"); 582 cam_periph_release(periph); 583 return; 584 } 585 586 cam_periph_release(periph); 587 } 588 589 static cam_status 590 adaregister(struct cam_periph *periph, void *arg) 591 { 592 struct ada_softc *softc; 593 struct ccb_pathinq cpi; 594 struct ccb_getdev *cgd; 595 char announce_buf[80]; 596 struct disk_params *dp; 597 caddr_t match; 598 u_int maxio; 599 600 cgd = (struct ccb_getdev *)arg; 601 if (periph == NULL) { 602 printf("adaregister: periph was NULL!!\n"); 603 return(CAM_REQ_CMP_ERR); 604 } 605 606 if (cgd == NULL) { 607 printf("adaregister: no getdev CCB, can't register device\n"); 608 return(CAM_REQ_CMP_ERR); 609 } 610 611 softc = (struct ada_softc *)malloc(sizeof(*softc), M_DEVBUF, 612 M_NOWAIT|M_ZERO); 613 614 if (softc == NULL) { 615 printf("adaregister: Unable to probe new device. " 616 "Unable to allocate softc\n"); 617 return(CAM_REQ_CMP_ERR); 618 } 619 620 bioq_init(&softc->bio_queue); 621 622 if (cgd->ident_data.capabilities1 & ATA_SUPPORT_DMA) 623 softc->flags |= ADA_FLAG_CAN_DMA; 624 if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) 625 softc->flags |= ADA_FLAG_CAN_48BIT; 626 if (cgd->ident_data.support.command2 & ATA_SUPPORT_FLUSHCACHE) 627 softc->flags |= ADA_FLAG_CAN_FLUSHCACHE; 628 if (cgd->ident_data.satacapabilities & ATA_SUPPORT_NCQ && 629 cgd->inq_flags & SID_CmdQue) 630 softc->flags |= ADA_FLAG_CAN_NCQ; 631 softc->state = ADA_STATE_NORMAL; 632 633 periph->softc = softc; 634 635 /* 636 * See if this device has any quirks. 637 */ 638 match = cam_quirkmatch((caddr_t)&cgd->ident_data, 639 (caddr_t)ada_quirk_table, 640 sizeof(ada_quirk_table)/sizeof(*ada_quirk_table), 641 sizeof(*ada_quirk_table), ata_identify_match); 642 if (match != NULL) 643 softc->quirks = ((struct ada_quirk_entry *)match)->quirks; 644 else 645 softc->quirks = ADA_Q_NONE; 646 647 /* Check if the SIM does not want queued commands */ 648 bzero(&cpi, sizeof(cpi)); 649 xpt_setup_ccb(&cpi.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 650 cpi.ccb_h.func_code = XPT_PATH_INQ; 651 xpt_action((union ccb *)&cpi); 652 if (cpi.ccb_h.status != CAM_REQ_CMP || 653 (cpi.hba_inquiry & PI_TAG_ABLE) == 0) 654 softc->flags &= ~ADA_FLAG_CAN_NCQ; 655 656 TASK_INIT(&softc->sysctl_task, 0, adasysctlinit, periph); 657 658 /* 659 * Register this media as a disk 660 */ 661 mtx_unlock(periph->sim->mtx); 662 adagetparams(periph, cgd); 663 softc->disk = disk_alloc(); 664 softc->disk->d_open = adaopen; 665 softc->disk->d_close = adaclose; 666 softc->disk->d_strategy = adastrategy; 667 softc->disk->d_dump = adadump; 668 softc->disk->d_name = "ada"; 669 softc->disk->d_drv1 = periph; 670 maxio = cpi.maxio; /* Honor max I/O size of SIM */ 671 if (maxio == 0) 672 maxio = DFLTPHYS; /* traditional default */ 673 else if (maxio > MAXPHYS) 674 maxio = MAXPHYS; /* for safety */ 675 if (cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) 676 maxio = min(maxio, 65536 * softc->params.secsize); 677 else /* 28bit ATA command limit */ 678 maxio = min(maxio, 256 * softc->params.secsize); 679 softc->disk->d_maxsize = maxio; 680 softc->disk->d_unit = periph->unit_number; 681 softc->disk->d_flags = 0; 682 if (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) 683 softc->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; 684 strlcpy(softc->disk->d_ident, cgd->serial_num, 685 MIN(sizeof(softc->disk->d_ident), cgd->serial_num_len + 1)); 686 687 softc->disk->d_sectorsize = softc->params.secsize; 688 softc->disk->d_mediasize = (off_t)softc->params.sectors * 689 softc->params.secsize; 690 softc->disk->d_stripesize = ata_physical_sector_size(&cgd->ident_data); 691 softc->disk->d_stripeoffset = softc->disk->d_stripesize - 692 ata_logical_sector_offset(&cgd->ident_data); 693 /* XXX: these are not actually "firmware" values, so they may be wrong */ 694 softc->disk->d_fwsectors = softc->params.secs_per_track; 695 softc->disk->d_fwheads = softc->params.heads; 696 697 disk_create(softc->disk, DISK_VERSION); 698 mtx_lock(periph->sim->mtx); 699 700 dp = &softc->params; 701 snprintf(announce_buf, sizeof(announce_buf), 702 "%juMB (%ju %u byte sectors: %dH %dS/T %dC)", 703 (uintmax_t)(((uintmax_t)dp->secsize * 704 dp->sectors) / (1024*1024)), 705 (uintmax_t)dp->sectors, 706 dp->secsize, dp->heads, 707 dp->secs_per_track, dp->cylinders); 708 xpt_announce_periph(periph, announce_buf); 709 /* 710 * Add async callbacks for bus reset and 711 * bus device reset calls. I don't bother 712 * checking if this fails as, in most cases, 713 * the system will function just fine without 714 * them and the only alternative would be to 715 * not attach the device on failure. 716 */ 717 xpt_register_async(AC_LOST_DEVICE, 718 adaasync, periph, periph->path); 719 720 /* 721 * Schedule a periodic event to occasionally send an 722 * ordered tag to a device. 723 */ 724 callout_init_mtx(&softc->sendordered_c, periph->sim->mtx, 0); 725 callout_reset(&softc->sendordered_c, 726 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL, 727 adasendorderedtag, softc); 728 729 return(CAM_REQ_CMP); 730 } 731 732 static void 733 adastart(struct cam_periph *periph, union ccb *start_ccb) 734 { 735 struct ada_softc *softc = (struct ada_softc *)periph->softc; 736 struct ccb_ataio *ataio = &start_ccb->ataio; 737 738 switch (softc->state) { 739 case ADA_STATE_NORMAL: 740 { 741 /* Pull a buffer from the queue and get going on it */ 742 struct bio *bp; 743 744 /* 745 * See if there is a buf with work for us to do.. 746 */ 747 bp = bioq_first(&softc->bio_queue); 748 if (periph->immediate_priority <= periph->pinfo.priority) { 749 CAM_DEBUG_PRINT(CAM_DEBUG_SUBTRACE, 750 ("queuing for immediate ccb\n")); 751 start_ccb->ccb_h.ccb_state = ADA_CCB_WAITING; 752 SLIST_INSERT_HEAD(&periph->ccb_list, &start_ccb->ccb_h, 753 periph_links.sle); 754 periph->immediate_priority = CAM_PRIORITY_NONE; 755 wakeup(&periph->ccb_list); 756 } else if (bp == NULL) { 757 xpt_release_ccb(start_ccb); 758 } else { 759 u_int8_t tag_code; 760 761 bioq_remove(&softc->bio_queue, bp); 762 763 if ((softc->flags & ADA_FLAG_NEED_OTAG) != 0) { 764 softc->flags &= ~ADA_FLAG_NEED_OTAG; 765 softc->ordered_tag_count++; 766 tag_code = 0; 767 } else { 768 tag_code = 1; 769 } 770 switch (bp->bio_cmd) { 771 case BIO_READ: 772 case BIO_WRITE: 773 { 774 uint64_t lba = bp->bio_pblkno; 775 uint16_t count = bp->bio_bcount / softc->params.secsize; 776 777 cam_fill_ataio(ataio, 778 ada_retry_count, 779 adadone, 780 bp->bio_cmd == BIO_READ ? 781 CAM_DIR_IN : CAM_DIR_OUT, 782 tag_code, 783 bp->bio_data, 784 bp->bio_bcount, 785 ada_default_timeout*1000); 786 787 if ((softc->flags & ADA_FLAG_CAN_NCQ) && tag_code) { 788 if (bp->bio_cmd == BIO_READ) { 789 ata_ncq_cmd(ataio, ATA_READ_FPDMA_QUEUED, 790 lba, count); 791 } else { 792 ata_ncq_cmd(ataio, ATA_WRITE_FPDMA_QUEUED, 793 lba, count); 794 } 795 } else if ((softc->flags & ADA_FLAG_CAN_48BIT) && 796 (lba + count >= ATA_MAX_28BIT_LBA || 797 count > 256)) { 798 if (softc->flags & ADA_FLAG_CAN_DMA) { 799 if (bp->bio_cmd == BIO_READ) { 800 ata_48bit_cmd(ataio, ATA_READ_DMA48, 801 0, lba, count); 802 } else { 803 ata_48bit_cmd(ataio, ATA_WRITE_DMA48, 804 0, lba, count); 805 } 806 } else { 807 if (bp->bio_cmd == BIO_READ) { 808 ata_48bit_cmd(ataio, ATA_READ_MUL48, 809 0, lba, count); 810 } else { 811 ata_48bit_cmd(ataio, ATA_WRITE_MUL48, 812 0, lba, count); 813 } 814 } 815 } else { 816 if (count == 256) 817 count = 0; 818 if (softc->flags & ADA_FLAG_CAN_DMA) { 819 if (bp->bio_cmd == BIO_READ) { 820 ata_28bit_cmd(ataio, ATA_READ_DMA, 821 0, lba, count); 822 } else { 823 ata_28bit_cmd(ataio, ATA_WRITE_DMA, 824 0, lba, count); 825 } 826 } else { 827 if (bp->bio_cmd == BIO_READ) { 828 ata_28bit_cmd(ataio, ATA_READ_MUL, 829 0, lba, count); 830 } else { 831 ata_28bit_cmd(ataio, ATA_WRITE_MUL, 832 0, lba, count); 833 } 834 } 835 } 836 } 837 break; 838 case BIO_FLUSH: 839 cam_fill_ataio(ataio, 840 1, 841 adadone, 842 CAM_DIR_NONE, 843 0, 844 NULL, 845 0, 846 ada_default_timeout*1000); 847 848 if (softc->flags & ADA_FLAG_CAN_48BIT) 849 ata_48bit_cmd(ataio, ATA_FLUSHCACHE48, 0, 0, 0); 850 else 851 ata_28bit_cmd(ataio, ATA_FLUSHCACHE, 0, 0, 0); 852 break; 853 } 854 start_ccb->ccb_h.ccb_state = ADA_CCB_BUFFER_IO; 855 start_ccb->ccb_h.ccb_bp = bp; 856 softc->outstanding_cmds++; 857 xpt_action(start_ccb); 858 bp = bioq_first(&softc->bio_queue); 859 } 860 861 if (bp != NULL) { 862 /* Have more work to do, so ensure we stay scheduled */ 863 xpt_schedule(periph, CAM_PRIORITY_NORMAL); 864 } 865 break; 866 } 867 } 868 } 869 870 static void 871 adadone(struct cam_periph *periph, union ccb *done_ccb) 872 { 873 struct ada_softc *softc; 874 struct ccb_ataio *ataio; 875 876 softc = (struct ada_softc *)periph->softc; 877 ataio = &done_ccb->ataio; 878 switch (ataio->ccb_h.ccb_state & ADA_CCB_TYPE_MASK) { 879 case ADA_CCB_BUFFER_IO: 880 { 881 struct bio *bp; 882 883 bp = (struct bio *)done_ccb->ccb_h.ccb_bp; 884 if ((done_ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) { 885 int error; 886 887 error = adaerror(done_ccb, 0, 0); 888 if (error == ERESTART) { 889 /* A retry was scheduled, so just return. */ 890 return; 891 } 892 if (error != 0) { 893 if (error == ENXIO) { 894 /* 895 * Catastrophic error. Mark our pack as 896 * invalid. 897 */ 898 /* 899 * XXX See if this is really a media 900 * XXX change first? 901 */ 902 xpt_print(periph->path, 903 "Invalidating pack\n"); 904 softc->flags |= ADA_FLAG_PACK_INVALID; 905 } 906 907 /* 908 * return all queued I/O with EIO, so that 909 * the client can retry these I/Os in the 910 * proper order should it attempt to recover. 911 */ 912 bioq_flush(&softc->bio_queue, NULL, EIO); 913 bp->bio_error = error; 914 bp->bio_resid = bp->bio_bcount; 915 bp->bio_flags |= BIO_ERROR; 916 } else { 917 bp->bio_resid = ataio->resid; 918 bp->bio_error = 0; 919 if (bp->bio_resid != 0) 920 bp->bio_flags |= BIO_ERROR; 921 } 922 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 923 cam_release_devq(done_ccb->ccb_h.path, 924 /*relsim_flags*/0, 925 /*reduction*/0, 926 /*timeout*/0, 927 /*getcount_only*/0); 928 } else { 929 if ((done_ccb->ccb_h.status & CAM_DEV_QFRZN) != 0) 930 panic("REQ_CMP with QFRZN"); 931 bp->bio_resid = ataio->resid; 932 if (ataio->resid > 0) 933 bp->bio_flags |= BIO_ERROR; 934 } 935 softc->outstanding_cmds--; 936 if (softc->outstanding_cmds == 0) 937 softc->flags |= ADA_FLAG_WENT_IDLE; 938 939 biodone(bp); 940 break; 941 } 942 case ADA_CCB_WAITING: 943 { 944 /* Caller will release the CCB */ 945 wakeup(&done_ccb->ccb_h.cbfcnp); 946 return; 947 } 948 case ADA_CCB_DUMP: 949 /* No-op. We're polling */ 950 return; 951 default: 952 break; 953 } 954 xpt_release_ccb(done_ccb); 955 } 956 957 static int 958 adaerror(union ccb *ccb, u_int32_t cam_flags, u_int32_t sense_flags) 959 { 960 struct ada_softc *softc; 961 struct cam_periph *periph; 962 963 periph = xpt_path_periph(ccb->ccb_h.path); 964 softc = (struct ada_softc *)periph->softc; 965 966 return(cam_periph_error(ccb, cam_flags, sense_flags, 967 &softc->saved_ccb)); 968 } 969 970 static void 971 adagetparams(struct cam_periph *periph, struct ccb_getdev *cgd) 972 { 973 struct ada_softc *softc = (struct ada_softc *)periph->softc; 974 struct disk_params *dp = &softc->params; 975 u_int64_t lbasize48; 976 u_int32_t lbasize; 977 978 dp->secsize = ata_logical_sector_size(&cgd->ident_data); 979 if ((cgd->ident_data.atavalid & ATA_FLAG_54_58) && 980 cgd->ident_data.current_heads && cgd->ident_data.current_sectors) { 981 dp->heads = cgd->ident_data.current_heads; 982 dp->secs_per_track = cgd->ident_data.current_sectors; 983 dp->cylinders = cgd->ident_data.cylinders; 984 dp->sectors = (u_int32_t)cgd->ident_data.current_size_1 | 985 ((u_int32_t)cgd->ident_data.current_size_2 << 16); 986 } else { 987 dp->heads = cgd->ident_data.heads; 988 dp->secs_per_track = cgd->ident_data.sectors; 989 dp->cylinders = cgd->ident_data.cylinders; 990 dp->sectors = cgd->ident_data.cylinders * dp->heads * dp->secs_per_track; 991 } 992 lbasize = (u_int32_t)cgd->ident_data.lba_size_1 | 993 ((u_int32_t)cgd->ident_data.lba_size_2 << 16); 994 995 /* use the 28bit LBA size if valid or bigger than the CHS mapping */ 996 if (cgd->ident_data.cylinders == 16383 || dp->sectors < lbasize) 997 dp->sectors = lbasize; 998 999 /* use the 48bit LBA size if valid */ 1000 lbasize48 = ((u_int64_t)cgd->ident_data.lba_size48_1) | 1001 ((u_int64_t)cgd->ident_data.lba_size48_2 << 16) | 1002 ((u_int64_t)cgd->ident_data.lba_size48_3 << 32) | 1003 ((u_int64_t)cgd->ident_data.lba_size48_4 << 48); 1004 if ((cgd->ident_data.support.command2 & ATA_SUPPORT_ADDRESS48) && 1005 lbasize48 > ATA_MAX_28BIT_LBA) 1006 dp->sectors = lbasize48; 1007 } 1008 1009 static void 1010 adasendorderedtag(void *arg) 1011 { 1012 struct ada_softc *softc = arg; 1013 1014 if (ada_send_ordered) { 1015 if ((softc->ordered_tag_count == 0) 1016 && ((softc->flags & ADA_FLAG_WENT_IDLE) == 0)) { 1017 softc->flags |= ADA_FLAG_NEED_OTAG; 1018 } 1019 if (softc->outstanding_cmds > 0) 1020 softc->flags &= ~ADA_FLAG_WENT_IDLE; 1021 1022 softc->ordered_tag_count = 0; 1023 } 1024 /* Queue us up again */ 1025 callout_reset(&softc->sendordered_c, 1026 (ADA_DEFAULT_TIMEOUT * hz) / ADA_ORDEREDTAG_INTERVAL, 1027 adasendorderedtag, softc); 1028 } 1029 1030 /* 1031 * Step through all ADA peripheral drivers, and if the device is still open, 1032 * sync the disk cache to physical media. 1033 */ 1034 static void 1035 adashutdown(void * arg, int howto) 1036 { 1037 struct cam_periph *periph; 1038 struct ada_softc *softc; 1039 1040 TAILQ_FOREACH(periph, &adadriver.units, unit_links) { 1041 union ccb ccb; 1042 1043 /* If we paniced with lock held - not recurse here. */ 1044 if (cam_periph_owned(periph)) 1045 continue; 1046 cam_periph_lock(periph); 1047 softc = (struct ada_softc *)periph->softc; 1048 /* 1049 * We only sync the cache if the drive is still open, and 1050 * if the drive is capable of it.. 1051 */ 1052 if (((softc->flags & ADA_FLAG_OPEN) == 0) || 1053 (softc->flags & ADA_FLAG_CAN_FLUSHCACHE) == 0) { 1054 cam_periph_unlock(periph); 1055 continue; 1056 } 1057 1058 xpt_setup_ccb(&ccb.ccb_h, periph->path, CAM_PRIORITY_NORMAL); 1059 1060 ccb.ccb_h.ccb_state = ADA_CCB_DUMP; 1061 cam_fill_ataio(&ccb.ataio, 1062 1, 1063 adadone, 1064 CAM_DIR_NONE, 1065 0, 1066 NULL, 1067 0, 1068 ada_default_timeout*1000); 1069 1070 if (softc->flags & ADA_FLAG_CAN_48BIT) 1071 ata_48bit_cmd(&ccb.ataio, ATA_FLUSHCACHE48, 0, 0, 0); 1072 else 1073 ata_28bit_cmd(&ccb.ataio, ATA_FLUSHCACHE, 0, 0, 0); 1074 xpt_polled_action(&ccb); 1075 1076 if ((ccb.ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 1077 xpt_print(periph->path, "Synchronize cache failed\n"); 1078 1079 if ((ccb.ccb_h.status & CAM_DEV_QFRZN) != 0) 1080 cam_release_devq(ccb.ccb_h.path, 1081 /*relsim_flags*/0, 1082 /*reduction*/0, 1083 /*timeout*/0, 1084 /*getcount_only*/0); 1085 cam_periph_unlock(periph); 1086 } 1087 } 1088 1089 #endif /* _KERNEL */ 1090