1 /*- 2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ata.h" 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/ata.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/endian.h> 37 #include <sys/ctype.h> 38 #include <sys/conf.h> 39 #include <sys/bus.h> 40 #include <sys/bio.h> 41 #include <sys/malloc.h> 42 #include <sys/sysctl.h> 43 #include <sys/sema.h> 44 #include <sys/taskqueue.h> 45 #include <vm/uma.h> 46 #include <machine/stdarg.h> 47 #include <machine/resource.h> 48 #include <machine/bus.h> 49 #include <sys/rman.h> 50 #include <dev/ata/ata-all.h> 51 #include <dev/pci/pcivar.h> 52 #include <ata_if.h> 53 54 #ifdef ATA_CAM 55 #include <cam/cam.h> 56 #include <cam/cam_ccb.h> 57 #include <cam/cam_sim.h> 58 #include <cam/cam_xpt_sim.h> 59 #include <cam/cam_debug.h> 60 #endif 61 62 #ifndef ATA_CAM 63 /* device structure */ 64 static d_ioctl_t ata_ioctl; 65 static struct cdevsw ata_cdevsw = { 66 .d_version = D_VERSION, 67 .d_flags = D_NEEDGIANT, /* we need this as newbus isn't mpsafe */ 68 .d_ioctl = ata_ioctl, 69 .d_name = "ata", 70 }; 71 #endif 72 73 /* prototypes */ 74 #ifndef ATA_CAM 75 static void ata_boot_attach(void); 76 static device_t ata_add_child(device_t, struct ata_device *, int); 77 #else 78 static void ataaction(struct cam_sim *sim, union ccb *ccb); 79 static void atapoll(struct cam_sim *sim); 80 #endif 81 static void ata_conn_event(void *, int); 82 #ifndef ATA_CAM 83 static void bswap(int8_t *, int); 84 static void btrim(int8_t *, int); 85 static void bpack(int8_t *, int8_t *, int); 86 #endif 87 static void ata_interrupt_locked(void *data); 88 #ifdef ATA_CAM 89 static void ata_periodic_poll(void *data); 90 #endif 91 92 /* global vars */ 93 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer"); 94 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL; 95 #ifndef ATA_CAM 96 struct intr_config_hook *ata_delayed_attach = NULL; 97 #endif 98 devclass_t ata_devclass; 99 uma_zone_t ata_request_zone; 100 uma_zone_t ata_composite_zone; 101 #ifndef ATA_CAM 102 int ata_wc = 1; 103 int ata_setmax = 0; 104 #endif 105 int ata_dma_check_80pin = 1; 106 107 /* local vars */ 108 #ifndef ATA_CAM 109 static int ata_dma = 1; 110 static int atapi_dma = 1; 111 #endif 112 113 /* sysctl vars */ 114 static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); 115 #ifndef ATA_CAM 116 TUNABLE_INT("hw.ata.ata_dma", &ata_dma); 117 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0, 118 "ATA disk DMA mode control"); 119 #endif 120 TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin); 121 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin, 122 CTLFLAG_RW, &ata_dma_check_80pin, 1, 123 "Check for 80pin cable before setting ATA DMA mode"); 124 #ifndef ATA_CAM 125 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma); 126 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0, 127 "ATAPI device DMA mode control"); 128 TUNABLE_INT("hw.ata.wc", &ata_wc); 129 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0, 130 "ATA disk write caching"); 131 TUNABLE_INT("hw.ata.setmax", &ata_setmax); 132 SYSCTL_INT(_hw_ata, OID_AUTO, setmax, CTLFLAG_RDTUN, &ata_setmax, 0, 133 "ATA disk set max native address"); 134 #endif 135 #ifdef ATA_CAM 136 FEATURE(ata_cam, "ATA devices are accessed through the cam(4) driver"); 137 #endif 138 139 /* 140 * newbus device interface related functions 141 */ 142 int 143 ata_probe(device_t dev) 144 { 145 return 0; 146 } 147 148 int 149 ata_attach(device_t dev) 150 { 151 struct ata_channel *ch = device_get_softc(dev); 152 int error, rid; 153 #ifdef ATA_CAM 154 struct cam_devq *devq; 155 const char *res; 156 char buf[64]; 157 int i, mode; 158 #endif 159 160 /* check that we have a virgin channel to attach */ 161 if (ch->r_irq) 162 return EEXIST; 163 164 /* initialize the softc basics */ 165 ch->dev = dev; 166 ch->state = ATA_IDLE; 167 bzero(&ch->state_mtx, sizeof(struct mtx)); 168 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF); 169 bzero(&ch->queue_mtx, sizeof(struct mtx)); 170 mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF); 171 TAILQ_INIT(&ch->ata_queue); 172 TASK_INIT(&ch->conntask, 0, ata_conn_event, dev); 173 #ifdef ATA_CAM 174 for (i = 0; i < 16; i++) { 175 ch->user[i].mode = 0; 176 snprintf(buf, sizeof(buf), "dev%d.mode", i); 177 if (resource_string_value(device_get_name(dev), 178 device_get_unit(dev), buf, &res) == 0) 179 mode = ata_str2mode(res); 180 else if (resource_string_value(device_get_name(dev), 181 device_get_unit(dev), "mode", &res) == 0) 182 mode = ata_str2mode(res); 183 else 184 mode = -1; 185 if (mode >= 0) 186 ch->user[i].mode = mode; 187 if (ch->flags & ATA_SATA) 188 ch->user[i].bytecount = 8192; 189 else 190 ch->user[i].bytecount = MAXPHYS; 191 ch->user[i].caps = 0; 192 ch->curr[i] = ch->user[i]; 193 if (ch->pm_level > 0) 194 ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ; 195 if (ch->pm_level > 1) 196 ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ; 197 } 198 callout_init(&ch->poll_callout, 1); 199 #endif 200 201 #ifndef ATA_CAM 202 /* reset the controller HW, the channel and device(s) */ 203 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) 204 pause("ataatch", 1); 205 ATA_RESET(dev); 206 ATA_LOCKING(dev, ATA_LF_UNLOCK); 207 #endif 208 209 /* allocate DMA resources if DMA HW present*/ 210 if (ch->dma.alloc) 211 ch->dma.alloc(dev); 212 213 /* setup interrupt delivery */ 214 rid = ATA_IRQ_RID; 215 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 216 RF_SHAREABLE | RF_ACTIVE); 217 if (!ch->r_irq) { 218 device_printf(dev, "unable to allocate interrupt\n"); 219 return ENXIO; 220 } 221 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 222 ata_interrupt, ch, &ch->ih))) { 223 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); 224 device_printf(dev, "unable to setup interrupt\n"); 225 return error; 226 } 227 228 #ifndef ATA_CAM 229 /* probe and attach devices on this channel unless we are in early boot */ 230 if (!ata_delayed_attach) 231 ata_identify(dev); 232 return (0); 233 #else 234 if (ch->flags & ATA_PERIODIC_POLL) 235 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 236 mtx_lock(&ch->state_mtx); 237 /* Create the device queue for our SIM. */ 238 devq = cam_simq_alloc(1); 239 if (devq == NULL) { 240 device_printf(dev, "Unable to allocate simq\n"); 241 error = ENOMEM; 242 goto err1; 243 } 244 /* Construct SIM entry */ 245 ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch, 246 device_get_unit(dev), &ch->state_mtx, 1, 0, devq); 247 if (ch->sim == NULL) { 248 device_printf(dev, "unable to allocate sim\n"); 249 cam_simq_free(devq); 250 error = ENOMEM; 251 goto err1; 252 } 253 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { 254 device_printf(dev, "unable to register xpt bus\n"); 255 error = ENXIO; 256 goto err2; 257 } 258 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), 259 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 260 device_printf(dev, "unable to create path\n"); 261 error = ENXIO; 262 goto err3; 263 } 264 mtx_unlock(&ch->state_mtx); 265 return (0); 266 267 err3: 268 xpt_bus_deregister(cam_sim_path(ch->sim)); 269 err2: 270 cam_sim_free(ch->sim, /*free_devq*/TRUE); 271 ch->sim = NULL; 272 err1: 273 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); 274 mtx_unlock(&ch->state_mtx); 275 if (ch->flags & ATA_PERIODIC_POLL) 276 callout_drain(&ch->poll_callout); 277 return (error); 278 #endif 279 } 280 281 int 282 ata_detach(device_t dev) 283 { 284 struct ata_channel *ch = device_get_softc(dev); 285 #ifndef ATA_CAM 286 device_t *children; 287 int nchildren, i; 288 #endif 289 290 /* check that we have a valid channel to detach */ 291 if (!ch->r_irq) 292 return ENXIO; 293 294 /* grap the channel lock so no new requests gets launched */ 295 mtx_lock(&ch->state_mtx); 296 ch->state |= ATA_STALL_QUEUE; 297 mtx_unlock(&ch->state_mtx); 298 #ifdef ATA_CAM 299 if (ch->flags & ATA_PERIODIC_POLL) 300 callout_drain(&ch->poll_callout); 301 #endif 302 303 #ifndef ATA_CAM 304 /* detach & delete all children */ 305 if (!device_get_children(dev, &children, &nchildren)) { 306 for (i = 0; i < nchildren; i++) 307 if (children[i]) 308 device_delete_child(dev, children[i]); 309 free(children, M_TEMP); 310 } 311 #endif 312 taskqueue_drain(taskqueue_thread, &ch->conntask); 313 314 #ifdef ATA_CAM 315 mtx_lock(&ch->state_mtx); 316 xpt_async(AC_LOST_DEVICE, ch->path, NULL); 317 xpt_free_path(ch->path); 318 xpt_bus_deregister(cam_sim_path(ch->sim)); 319 cam_sim_free(ch->sim, /*free_devq*/TRUE); 320 ch->sim = NULL; 321 mtx_unlock(&ch->state_mtx); 322 #endif 323 324 /* release resources */ 325 bus_teardown_intr(dev, ch->r_irq, ch->ih); 326 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 327 ch->r_irq = NULL; 328 329 /* free DMA resources if DMA HW present*/ 330 if (ch->dma.free) 331 ch->dma.free(dev); 332 333 mtx_destroy(&ch->state_mtx); 334 mtx_destroy(&ch->queue_mtx); 335 return 0; 336 } 337 338 static void 339 ata_conn_event(void *context, int dummy) 340 { 341 device_t dev = (device_t)context; 342 #ifdef ATA_CAM 343 struct ata_channel *ch = device_get_softc(dev); 344 union ccb *ccb; 345 346 mtx_lock(&ch->state_mtx); 347 if (ch->sim == NULL) { 348 mtx_unlock(&ch->state_mtx); 349 return; 350 } 351 ata_reinit(dev); 352 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 353 return; 354 if (xpt_create_path(&ccb->ccb_h.path, NULL, 355 cam_sim_path(ch->sim), 356 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 357 xpt_free_ccb(ccb); 358 return; 359 } 360 xpt_rescan(ccb); 361 mtx_unlock(&ch->state_mtx); 362 #else 363 ata_reinit(dev); 364 #endif 365 } 366 367 int 368 ata_reinit(device_t dev) 369 { 370 struct ata_channel *ch = device_get_softc(dev); 371 struct ata_request *request; 372 #ifndef ATA_CAM 373 device_t *children; 374 int nchildren, i; 375 376 /* check that we have a valid channel to reinit */ 377 if (!ch || !ch->r_irq) 378 return ENXIO; 379 380 if (bootverbose) 381 device_printf(dev, "reiniting channel ..\n"); 382 383 /* poll for locking the channel */ 384 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) 385 pause("atarini", 1); 386 387 /* catch eventual request in ch->running */ 388 mtx_lock(&ch->state_mtx); 389 if (ch->state & ATA_STALL_QUEUE) { 390 /* Recursive reinits and reinits during detach prohobited. */ 391 mtx_unlock(&ch->state_mtx); 392 return (ENXIO); 393 } 394 if ((request = ch->running)) 395 callout_stop(&request->callout); 396 ch->running = NULL; 397 398 /* unconditionally grap the channel lock */ 399 ch->state |= ATA_STALL_QUEUE; 400 mtx_unlock(&ch->state_mtx); 401 402 /* reset the controller HW, the channel and device(s) */ 403 ATA_RESET(dev); 404 405 /* reinit the children and delete any that fails */ 406 if (!device_get_children(dev, &children, &nchildren)) { 407 mtx_lock(&Giant); /* newbus suckage it needs Giant */ 408 for (i = 0; i < nchildren; i++) { 409 /* did any children go missing ? */ 410 if (children[i] && device_is_attached(children[i]) && 411 ATA_REINIT(children[i])) { 412 /* 413 * if we had a running request and its device matches 414 * this child we need to inform the request that the 415 * device is gone. 416 */ 417 if (request && request->dev == children[i]) { 418 request->result = ENXIO; 419 device_printf(request->dev, "FAILURE - device detached\n"); 420 421 /* if not timeout finish request here */ 422 if (!(request->flags & ATA_R_TIMEOUT)) 423 ata_finish(request); 424 request = NULL; 425 } 426 device_delete_child(dev, children[i]); 427 } 428 } 429 free(children, M_TEMP); 430 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ 431 } 432 433 /* if we still have a good request put it on the queue again */ 434 if (request && !(request->flags & ATA_R_TIMEOUT)) { 435 device_printf(request->dev, 436 "WARNING - %s requeued due to channel reset", 437 ata_cmd2str(request)); 438 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) 439 printf(" LBA=%ju", request->u.ata.lba); 440 printf("\n"); 441 request->flags |= ATA_R_REQUEUE; 442 ata_queue_request(request); 443 } 444 445 /* we're done release the channel for new work */ 446 mtx_lock(&ch->state_mtx); 447 ch->state = ATA_IDLE; 448 mtx_unlock(&ch->state_mtx); 449 ATA_LOCKING(dev, ATA_LF_UNLOCK); 450 451 /* Add new children. */ 452 /* ata_identify(dev); */ 453 454 if (bootverbose) 455 device_printf(dev, "reinit done ..\n"); 456 457 /* kick off requests on the queue */ 458 ata_start(dev); 459 #else 460 xpt_freeze_simq(ch->sim, 1); 461 if ((request = ch->running)) { 462 ch->running = NULL; 463 if (ch->state == ATA_ACTIVE) 464 ch->state = ATA_IDLE; 465 callout_stop(&request->callout); 466 if (ch->dma.unload) 467 ch->dma.unload(request); 468 request->result = ERESTART; 469 ata_cam_end_transaction(dev, request); 470 } 471 /* reset the controller HW, the channel and device(s) */ 472 ATA_RESET(dev); 473 /* Tell the XPT about the event */ 474 xpt_async(AC_BUS_RESET, ch->path, NULL); 475 xpt_release_simq(ch->sim, TRUE); 476 #endif 477 return(0); 478 } 479 480 int 481 ata_suspend(device_t dev) 482 { 483 struct ata_channel *ch; 484 485 /* check for valid device */ 486 if (!dev || !(ch = device_get_softc(dev))) 487 return ENXIO; 488 489 #ifdef ATA_CAM 490 if (ch->flags & ATA_PERIODIC_POLL) 491 callout_drain(&ch->poll_callout); 492 mtx_lock(&ch->state_mtx); 493 xpt_freeze_simq(ch->sim, 1); 494 while (ch->state != ATA_IDLE) 495 msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100); 496 mtx_unlock(&ch->state_mtx); 497 #else 498 /* wait for the channel to be IDLE or detached before suspending */ 499 while (ch->r_irq) { 500 mtx_lock(&ch->state_mtx); 501 if (ch->state == ATA_IDLE) { 502 ch->state = ATA_ACTIVE; 503 mtx_unlock(&ch->state_mtx); 504 break; 505 } 506 mtx_unlock(&ch->state_mtx); 507 tsleep(ch, PRIBIO, "atasusp", hz/10); 508 } 509 ATA_LOCKING(dev, ATA_LF_UNLOCK); 510 #endif 511 return(0); 512 } 513 514 int 515 ata_resume(device_t dev) 516 { 517 struct ata_channel *ch; 518 int error; 519 520 /* check for valid device */ 521 if (!dev || !(ch = device_get_softc(dev))) 522 return ENXIO; 523 524 #ifdef ATA_CAM 525 mtx_lock(&ch->state_mtx); 526 error = ata_reinit(dev); 527 xpt_release_simq(ch->sim, TRUE); 528 mtx_unlock(&ch->state_mtx); 529 if (ch->flags & ATA_PERIODIC_POLL) 530 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 531 #else 532 /* reinit the devices, we dont know what mode/state they are in */ 533 error = ata_reinit(dev); 534 /* kick off requests on the queue */ 535 ata_start(dev); 536 #endif 537 return error; 538 } 539 540 void 541 ata_interrupt(void *data) 542 { 543 #ifdef ATA_CAM 544 struct ata_channel *ch = (struct ata_channel *)data; 545 546 mtx_lock(&ch->state_mtx); 547 xpt_batch_start(ch->sim); 548 #endif 549 ata_interrupt_locked(data); 550 #ifdef ATA_CAM 551 xpt_batch_done(ch->sim); 552 mtx_unlock(&ch->state_mtx); 553 #endif 554 } 555 556 static void 557 ata_interrupt_locked(void *data) 558 { 559 struct ata_channel *ch = (struct ata_channel *)data; 560 struct ata_request *request; 561 562 #ifndef ATA_CAM 563 mtx_lock(&ch->state_mtx); 564 #endif 565 do { 566 /* ignore interrupt if its not for us */ 567 if (ch->hw.status && !ch->hw.status(ch->dev)) 568 break; 569 570 /* do we have a running request */ 571 if (!(request = ch->running)) 572 break; 573 574 ATA_DEBUG_RQ(request, "interrupt"); 575 576 /* safetycheck for the right state */ 577 if (ch->state == ATA_IDLE) { 578 device_printf(request->dev, "interrupt on idle channel ignored\n"); 579 break; 580 } 581 582 /* 583 * we have the HW locks, so end the transaction for this request 584 * if it finishes immediately otherwise wait for next interrupt 585 */ 586 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) { 587 ch->running = NULL; 588 if (ch->state == ATA_ACTIVE) 589 ch->state = ATA_IDLE; 590 #ifdef ATA_CAM 591 ata_cam_end_transaction(ch->dev, request); 592 #else 593 mtx_unlock(&ch->state_mtx); 594 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK); 595 ata_finish(request); 596 #endif 597 return; 598 } 599 } while (0); 600 #ifndef ATA_CAM 601 mtx_unlock(&ch->state_mtx); 602 #endif 603 } 604 605 #ifdef ATA_CAM 606 static void 607 ata_periodic_poll(void *data) 608 { 609 struct ata_channel *ch = (struct ata_channel *)data; 610 611 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 612 ata_interrupt(ch); 613 } 614 #endif 615 616 void 617 ata_print_cable(device_t dev, u_int8_t *who) 618 { 619 device_printf(dev, 620 "DMA limited to UDMA33, %s found non-ATA66 cable\n", who); 621 } 622 623 #ifndef ATA_CAM 624 int 625 ata_check_80pin(device_t dev, int mode) 626 { 627 struct ata_device *atadev = device_get_softc(dev); 628 629 if (!ata_dma_check_80pin) { 630 if (bootverbose) 631 device_printf(dev, "Skipping 80pin cable check\n"); 632 return mode; 633 } 634 635 if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) { 636 ata_print_cable(dev, "device"); 637 mode = ATA_UDMA2; 638 } 639 return mode; 640 } 641 #endif 642 643 #ifndef ATA_CAM 644 void 645 ata_setmode(device_t dev) 646 { 647 struct ata_channel *ch = device_get_softc(device_get_parent(dev)); 648 struct ata_device *atadev = device_get_softc(dev); 649 int error, mode, pmode; 650 651 mode = atadev->mode; 652 do { 653 pmode = mode = ata_limit_mode(dev, mode, ATA_DMA_MAX); 654 mode = ATA_SETMODE(device_get_parent(dev), atadev->unit, mode); 655 if ((ch->flags & (ATA_CHECKS_CABLE | ATA_SATA)) == 0) 656 mode = ata_check_80pin(dev, mode); 657 } while (pmode != mode); /* Interate till successfull negotiation. */ 658 error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); 659 if (bootverbose) 660 device_printf(dev, "%ssetting %s\n", 661 (error) ? "FAILURE " : "", ata_mode2str(mode)); 662 atadev->mode = mode; 663 } 664 #endif 665 666 /* 667 * device related interfaces 668 */ 669 #ifndef ATA_CAM 670 static int 671 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data, 672 int32_t flag, struct thread *td) 673 { 674 device_t device, *children; 675 struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data; 676 int *value = (int *)data; 677 int i, nchildren, error = ENOTTY; 678 679 switch (cmd) { 680 case IOCATAGMAXCHANNEL: 681 /* In case we have channel 0..n this will return n+1. */ 682 *value = devclass_get_maxunit(ata_devclass); 683 error = 0; 684 break; 685 686 case IOCATAREINIT: 687 if (*value >= devclass_get_maxunit(ata_devclass) || 688 !(device = devclass_get_device(ata_devclass, *value)) || 689 !device_is_attached(device)) 690 return ENXIO; 691 error = ata_reinit(device); 692 break; 693 694 case IOCATAATTACH: 695 if (*value >= devclass_get_maxunit(ata_devclass) || 696 !(device = devclass_get_device(ata_devclass, *value)) || 697 !device_is_attached(device)) 698 return ENXIO; 699 error = DEVICE_ATTACH(device); 700 break; 701 702 case IOCATADETACH: 703 if (*value >= devclass_get_maxunit(ata_devclass) || 704 !(device = devclass_get_device(ata_devclass, *value)) || 705 !device_is_attached(device)) 706 return ENXIO; 707 error = DEVICE_DETACH(device); 708 break; 709 710 case IOCATADEVICES: 711 if (devices->channel >= devclass_get_maxunit(ata_devclass) || 712 !(device = devclass_get_device(ata_devclass, devices->channel)) || 713 !device_is_attached(device)) 714 return ENXIO; 715 bzero(devices->name[0], 32); 716 bzero(&devices->params[0], sizeof(struct ata_params)); 717 bzero(devices->name[1], 32); 718 bzero(&devices->params[1], sizeof(struct ata_params)); 719 if (!device_get_children(device, &children, &nchildren)) { 720 for (i = 0; i < nchildren; i++) { 721 if (children[i] && device_is_attached(children[i])) { 722 struct ata_device *atadev = device_get_softc(children[i]); 723 724 if (atadev->unit == ATA_MASTER) { /* XXX SOS PM */ 725 strncpy(devices->name[0], 726 device_get_nameunit(children[i]), 32); 727 bcopy(&atadev->param, &devices->params[0], 728 sizeof(struct ata_params)); 729 } 730 if (atadev->unit == ATA_SLAVE) { /* XXX SOS PM */ 731 strncpy(devices->name[1], 732 device_get_nameunit(children[i]), 32); 733 bcopy(&atadev->param, &devices->params[1], 734 sizeof(struct ata_params)); 735 } 736 } 737 } 738 free(children, M_TEMP); 739 error = 0; 740 } 741 else 742 error = ENODEV; 743 break; 744 745 default: 746 if (ata_raid_ioctl_func) 747 error = ata_raid_ioctl_func(cmd, data); 748 } 749 return error; 750 } 751 #endif 752 753 #ifndef ATA_CAM 754 int 755 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data) 756 { 757 struct ata_device *atadev = device_get_softc(dev); 758 struct ata_channel *ch = device_get_softc(device_get_parent(dev)); 759 struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data; 760 struct ata_params *params = (struct ata_params *)data; 761 int *mode = (int *)data; 762 struct ata_request *request; 763 caddr_t buf; 764 int error; 765 766 switch (cmd) { 767 case IOCATAREQUEST: 768 if (ioc_request->count > 769 (ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS)) { 770 return (EFBIG); 771 } 772 if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) { 773 return ENOMEM; 774 } 775 if (!(request = ata_alloc_request())) { 776 free(buf, M_ATA); 777 return ENOMEM; 778 } 779 request->dev = atadev->dev; 780 if (ioc_request->flags & ATA_CMD_WRITE) { 781 error = copyin(ioc_request->data, buf, ioc_request->count); 782 if (error) { 783 free(buf, M_ATA); 784 ata_free_request(request); 785 return error; 786 } 787 } 788 if (ioc_request->flags & ATA_CMD_ATAPI) { 789 request->flags = ATA_R_ATAPI; 790 bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16); 791 } 792 else { 793 request->u.ata.command = ioc_request->u.ata.command; 794 request->u.ata.feature = ioc_request->u.ata.feature; 795 request->u.ata.lba = ioc_request->u.ata.lba; 796 request->u.ata.count = ioc_request->u.ata.count; 797 } 798 request->timeout = ioc_request->timeout; 799 request->data = buf; 800 request->bytecount = ioc_request->count; 801 request->transfersize = request->bytecount; 802 if (ioc_request->flags & ATA_CMD_CONTROL) 803 request->flags |= ATA_R_CONTROL; 804 if (ioc_request->flags & ATA_CMD_READ) 805 request->flags |= ATA_R_READ; 806 if (ioc_request->flags & ATA_CMD_WRITE) 807 request->flags |= ATA_R_WRITE; 808 ata_queue_request(request); 809 if (request->flags & ATA_R_ATAPI) { 810 bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense, 811 sizeof(struct atapi_sense)); 812 } 813 else { 814 ioc_request->u.ata.command = request->u.ata.command; 815 ioc_request->u.ata.feature = request->u.ata.feature; 816 ioc_request->u.ata.lba = request->u.ata.lba; 817 ioc_request->u.ata.count = request->u.ata.count; 818 } 819 ioc_request->error = request->result; 820 if (ioc_request->flags & ATA_CMD_READ) 821 error = copyout(buf, ioc_request->data, ioc_request->count); 822 else 823 error = 0; 824 free(buf, M_ATA); 825 ata_free_request(request); 826 return error; 827 828 case IOCATAGPARM: 829 ata_getparam(atadev, 0); 830 bcopy(&atadev->param, params, sizeof(struct ata_params)); 831 return 0; 832 833 case IOCATASMODE: 834 atadev->mode = *mode; 835 ata_setmode(dev); 836 return 0; 837 838 case IOCATAGMODE: 839 *mode = atadev->mode | 840 (ATA_GETREV(device_get_parent(dev), atadev->unit) << 8); 841 return 0; 842 case IOCATASSPINDOWN: 843 atadev->spindown = *mode; 844 return 0; 845 case IOCATAGSPINDOWN: 846 *mode = atadev->spindown; 847 return 0; 848 default: 849 return ENOTTY; 850 } 851 } 852 #endif 853 854 #ifndef ATA_CAM 855 static void 856 ata_boot_attach(void) 857 { 858 struct ata_channel *ch; 859 int ctlr; 860 861 mtx_lock(&Giant); /* newbus suckage it needs Giant */ 862 863 /* kick off probe and attach on all channels */ 864 for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) { 865 if ((ch = devclass_get_softc(ata_devclass, ctlr))) { 866 ata_identify(ch->dev); 867 } 868 } 869 870 /* release the hook that got us here, we are only needed once during boot */ 871 if (ata_delayed_attach) { 872 config_intrhook_disestablish(ata_delayed_attach); 873 free(ata_delayed_attach, M_TEMP); 874 ata_delayed_attach = NULL; 875 } 876 877 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ 878 } 879 #endif 880 881 /* 882 * misc support functions 883 */ 884 #ifndef ATA_CAM 885 static device_t 886 ata_add_child(device_t parent, struct ata_device *atadev, int unit) 887 { 888 device_t child; 889 890 if ((child = device_add_child(parent, (unit < 0) ? NULL : "ad", unit))) { 891 device_set_softc(child, atadev); 892 device_quiet(child); 893 atadev->dev = child; 894 atadev->max_iosize = DEV_BSIZE; 895 atadev->mode = ATA_PIO_MAX; 896 } 897 return child; 898 } 899 #endif 900 901 #ifndef ATA_CAM 902 int 903 ata_getparam(struct ata_device *atadev, int init) 904 { 905 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev)); 906 struct ata_request *request; 907 const char *res; 908 char buf[64]; 909 u_int8_t command = 0; 910 int error = ENOMEM, retries = 2, mode = -1; 911 912 if (ch->devices & (ATA_ATA_MASTER << atadev->unit)) 913 command = ATA_ATA_IDENTIFY; 914 if (ch->devices & (ATA_ATAPI_MASTER << atadev->unit)) 915 command = ATA_ATAPI_IDENTIFY; 916 if (!command) 917 return ENXIO; 918 919 while (retries-- > 0 && error) { 920 if (!(request = ata_alloc_request())) 921 break; 922 request->dev = atadev->dev; 923 request->timeout = 1; 924 request->retries = 0; 925 request->u.ata.command = command; 926 request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT); 927 if (!bootverbose) 928 request->flags |= ATA_R_QUIET; 929 request->data = (void *)&atadev->param; 930 request->bytecount = sizeof(struct ata_params); 931 request->donecount = 0; 932 request->transfersize = DEV_BSIZE; 933 ata_queue_request(request); 934 error = request->result; 935 ata_free_request(request); 936 } 937 938 if (!error && (isprint(atadev->param.model[0]) || 939 isprint(atadev->param.model[1]))) { 940 struct ata_params *atacap = &atadev->param; 941 int16_t *ptr; 942 943 for (ptr = (int16_t *)atacap; 944 ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) { 945 *ptr = le16toh(*ptr); 946 } 947 if (!(!strncmp(atacap->model, "FX", 2) || 948 !strncmp(atacap->model, "NEC", 3) || 949 !strncmp(atacap->model, "Pioneer", 7) || 950 !strncmp(atacap->model, "SHARP", 5))) { 951 bswap(atacap->model, sizeof(atacap->model)); 952 bswap(atacap->revision, sizeof(atacap->revision)); 953 bswap(atacap->serial, sizeof(atacap->serial)); 954 } 955 btrim(atacap->model, sizeof(atacap->model)); 956 bpack(atacap->model, atacap->model, sizeof(atacap->model)); 957 btrim(atacap->revision, sizeof(atacap->revision)); 958 bpack(atacap->revision, atacap->revision, sizeof(atacap->revision)); 959 btrim(atacap->serial, sizeof(atacap->serial)); 960 bpack(atacap->serial, atacap->serial, sizeof(atacap->serial)); 961 962 if (bootverbose) 963 printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n", 964 device_get_unit(ch->dev), 965 ata_unit2str(atadev), 966 ata_mode2str(ata_pmode(atacap)), 967 ata_mode2str(ata_wmode(atacap)), 968 ata_mode2str(ata_umode(atacap)), 969 (atacap->hwres & ATA_CABLE_ID) ? "80":"40"); 970 971 if (init) { 972 char buffer[64]; 973 974 sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision); 975 device_set_desc_copy(atadev->dev, buffer); 976 if ((atadev->param.config & ATA_PROTO_ATAPI) && 977 (atadev->param.config != ATA_CFA_MAGIC1) && 978 (atadev->param.config != ATA_CFA_MAGIC2)) { 979 if (atapi_dma && 980 (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR && 981 ata_umode(&atadev->param) >= ATA_UDMA2) 982 atadev->mode = ATA_DMA_MAX; 983 } 984 else { 985 if (ata_dma && 986 (ata_umode(&atadev->param) > 0 || 987 ata_wmode(&atadev->param) > 0)) 988 atadev->mode = ATA_DMA_MAX; 989 } 990 snprintf(buf, sizeof(buf), "dev%d.mode", atadev->unit); 991 if (resource_string_value(device_get_name(ch->dev), 992 device_get_unit(ch->dev), buf, &res) == 0) 993 mode = ata_str2mode(res); 994 else if (resource_string_value(device_get_name(ch->dev), 995 device_get_unit(ch->dev), "mode", &res) == 0) 996 mode = ata_str2mode(res); 997 if (mode >= 0) 998 atadev->mode = mode; 999 } 1000 } 1001 else { 1002 if (!error) 1003 error = ENXIO; 1004 } 1005 return error; 1006 } 1007 #endif 1008 1009 #ifndef ATA_CAM 1010 int 1011 ata_identify(device_t dev) 1012 { 1013 struct ata_channel *ch = device_get_softc(dev); 1014 struct ata_device *atadev; 1015 device_t *children; 1016 device_t child, master = NULL; 1017 int nchildren, i, n = ch->devices; 1018 1019 if (bootverbose) 1020 device_printf(dev, "Identifying devices: %08x\n", ch->devices); 1021 1022 mtx_lock(&Giant); 1023 /* Skip existing devices. */ 1024 if (!device_get_children(dev, &children, &nchildren)) { 1025 for (i = 0; i < nchildren; i++) { 1026 if (children[i] && (atadev = device_get_softc(children[i]))) 1027 n &= ~((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << atadev->unit); 1028 } 1029 free(children, M_TEMP); 1030 } 1031 /* Create new devices. */ 1032 if (bootverbose) 1033 device_printf(dev, "New devices: %08x\n", n); 1034 if (n == 0) { 1035 mtx_unlock(&Giant); 1036 return (0); 1037 } 1038 for (i = 0; i < ATA_PM; ++i) { 1039 if (n & (((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << i))) { 1040 int unit = -1; 1041 1042 if (!(atadev = malloc(sizeof(struct ata_device), 1043 M_ATA, M_NOWAIT | M_ZERO))) { 1044 device_printf(dev, "out of memory\n"); 1045 return ENOMEM; 1046 } 1047 atadev->unit = i; 1048 #ifdef ATA_STATIC_ID 1049 if (n & (ATA_ATA_MASTER << i)) 1050 unit = (device_get_unit(dev) << 1) + i; 1051 #endif 1052 if ((child = ata_add_child(dev, atadev, unit))) { 1053 /* 1054 * PATA slave should be identified first, to allow 1055 * device cable detection on master to work properly. 1056 */ 1057 if (i == 0 && (n & ATA_PORTMULTIPLIER) == 0 && 1058 (n & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << 1)) != 0) { 1059 master = child; 1060 continue; 1061 } 1062 if (ata_getparam(atadev, 1)) { 1063 device_delete_child(dev, child); 1064 free(atadev, M_ATA); 1065 } 1066 } 1067 else 1068 free(atadev, M_ATA); 1069 } 1070 } 1071 if (master) { 1072 atadev = device_get_softc(master); 1073 if (ata_getparam(atadev, 1)) { 1074 device_delete_child(dev, master); 1075 free(atadev, M_ATA); 1076 } 1077 } 1078 bus_generic_probe(dev); 1079 bus_generic_attach(dev); 1080 mtx_unlock(&Giant); 1081 return 0; 1082 } 1083 #endif 1084 1085 void 1086 ata_default_registers(device_t dev) 1087 { 1088 struct ata_channel *ch = device_get_softc(dev); 1089 1090 /* fill in the defaults from whats setup already */ 1091 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res; 1092 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset; 1093 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res; 1094 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset; 1095 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res; 1096 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset; 1097 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res; 1098 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset; 1099 } 1100 1101 void 1102 ata_modify_if_48bit(struct ata_request *request) 1103 { 1104 struct ata_channel *ch = device_get_softc(request->parent); 1105 struct ata_device *atadev = device_get_softc(request->dev); 1106 1107 request->flags &= ~ATA_R_48BIT; 1108 1109 if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA || 1110 request->u.ata.count > 256) && 1111 atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) { 1112 1113 /* translate command into 48bit version */ 1114 switch (request->u.ata.command) { 1115 case ATA_READ: 1116 request->u.ata.command = ATA_READ48; 1117 break; 1118 case ATA_READ_MUL: 1119 request->u.ata.command = ATA_READ_MUL48; 1120 break; 1121 case ATA_READ_DMA: 1122 if (ch->flags & ATA_NO_48BIT_DMA) { 1123 if (request->transfersize > DEV_BSIZE) 1124 request->u.ata.command = ATA_READ_MUL48; 1125 else 1126 request->u.ata.command = ATA_READ48; 1127 request->flags &= ~ATA_R_DMA; 1128 } 1129 else 1130 request->u.ata.command = ATA_READ_DMA48; 1131 break; 1132 case ATA_READ_DMA_QUEUED: 1133 if (ch->flags & ATA_NO_48BIT_DMA) { 1134 if (request->transfersize > DEV_BSIZE) 1135 request->u.ata.command = ATA_READ_MUL48; 1136 else 1137 request->u.ata.command = ATA_READ48; 1138 request->flags &= ~ATA_R_DMA; 1139 } 1140 else 1141 request->u.ata.command = ATA_READ_DMA_QUEUED48; 1142 break; 1143 case ATA_WRITE: 1144 request->u.ata.command = ATA_WRITE48; 1145 break; 1146 case ATA_WRITE_MUL: 1147 request->u.ata.command = ATA_WRITE_MUL48; 1148 break; 1149 case ATA_WRITE_DMA: 1150 if (ch->flags & ATA_NO_48BIT_DMA) { 1151 if (request->transfersize > DEV_BSIZE) 1152 request->u.ata.command = ATA_WRITE_MUL48; 1153 else 1154 request->u.ata.command = ATA_WRITE48; 1155 request->flags &= ~ATA_R_DMA; 1156 } 1157 else 1158 request->u.ata.command = ATA_WRITE_DMA48; 1159 break; 1160 case ATA_WRITE_DMA_QUEUED: 1161 if (ch->flags & ATA_NO_48BIT_DMA) { 1162 if (request->transfersize > DEV_BSIZE) 1163 request->u.ata.command = ATA_WRITE_MUL48; 1164 else 1165 request->u.ata.command = ATA_WRITE48; 1166 request->u.ata.command = ATA_WRITE48; 1167 request->flags &= ~ATA_R_DMA; 1168 } 1169 else 1170 request->u.ata.command = ATA_WRITE_DMA_QUEUED48; 1171 break; 1172 case ATA_FLUSHCACHE: 1173 request->u.ata.command = ATA_FLUSHCACHE48; 1174 break; 1175 case ATA_SET_MAX_ADDRESS: 1176 request->u.ata.command = ATA_SET_MAX_ADDRESS48; 1177 break; 1178 default: 1179 return; 1180 } 1181 request->flags |= ATA_R_48BIT; 1182 } 1183 else if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) { 1184 1185 /* translate command into 48bit version */ 1186 switch (request->u.ata.command) { 1187 case ATA_FLUSHCACHE: 1188 request->u.ata.command = ATA_FLUSHCACHE48; 1189 break; 1190 case ATA_READ_NATIVE_MAX_ADDRESS: 1191 request->u.ata.command = ATA_READ_NATIVE_MAX_ADDRESS48; 1192 break; 1193 case ATA_SET_MAX_ADDRESS: 1194 request->u.ata.command = ATA_SET_MAX_ADDRESS48; 1195 break; 1196 default: 1197 return; 1198 } 1199 request->flags |= ATA_R_48BIT; 1200 } 1201 } 1202 1203 void 1204 ata_udelay(int interval) 1205 { 1206 /* for now just use DELAY, the timer/sleep subsytems are not there yet */ 1207 if (1 || interval < (1000000/hz) || ata_delayed_attach) 1208 DELAY(interval); 1209 else 1210 pause("ataslp", interval/(1000000/hz)); 1211 } 1212 1213 #ifndef ATA_CAM 1214 const char * 1215 ata_unit2str(struct ata_device *atadev) 1216 { 1217 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev)); 1218 static char str[8]; 1219 1220 if (ch->devices & ATA_PORTMULTIPLIER) 1221 sprintf(str, "port%d", atadev->unit); 1222 else 1223 sprintf(str, "%s", atadev->unit == ATA_MASTER ? "master" : "slave"); 1224 return str; 1225 } 1226 #endif 1227 1228 const char * 1229 ata_mode2str(int mode) 1230 { 1231 switch (mode) { 1232 case -1: return "UNSUPPORTED"; 1233 case ATA_PIO0: return "PIO0"; 1234 case ATA_PIO1: return "PIO1"; 1235 case ATA_PIO2: return "PIO2"; 1236 case ATA_PIO3: return "PIO3"; 1237 case ATA_PIO4: return "PIO4"; 1238 case ATA_WDMA0: return "WDMA0"; 1239 case ATA_WDMA1: return "WDMA1"; 1240 case ATA_WDMA2: return "WDMA2"; 1241 case ATA_UDMA0: return "UDMA16"; 1242 case ATA_UDMA1: return "UDMA25"; 1243 case ATA_UDMA2: return "UDMA33"; 1244 case ATA_UDMA3: return "UDMA40"; 1245 case ATA_UDMA4: return "UDMA66"; 1246 case ATA_UDMA5: return "UDMA100"; 1247 case ATA_UDMA6: return "UDMA133"; 1248 case ATA_SA150: return "SATA150"; 1249 case ATA_SA300: return "SATA300"; 1250 default: 1251 if (mode & ATA_DMA_MASK) 1252 return "BIOSDMA"; 1253 else 1254 return "BIOSPIO"; 1255 } 1256 } 1257 1258 int 1259 ata_str2mode(const char *str) 1260 { 1261 1262 if (!strcasecmp(str, "PIO0")) return (ATA_PIO0); 1263 if (!strcasecmp(str, "PIO1")) return (ATA_PIO1); 1264 if (!strcasecmp(str, "PIO2")) return (ATA_PIO2); 1265 if (!strcasecmp(str, "PIO3")) return (ATA_PIO3); 1266 if (!strcasecmp(str, "PIO4")) return (ATA_PIO4); 1267 if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0); 1268 if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1); 1269 if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2); 1270 if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0); 1271 if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0); 1272 if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1); 1273 if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1); 1274 if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2); 1275 if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2); 1276 if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3); 1277 if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3); 1278 if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4); 1279 if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4); 1280 if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5); 1281 if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5); 1282 if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6); 1283 if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6); 1284 return (-1); 1285 } 1286 1287 #ifndef ATA_CAM 1288 const char * 1289 ata_satarev2str(int rev) 1290 { 1291 switch (rev) { 1292 case 0: return ""; 1293 case 1: return "SATA 1.5Gb/s"; 1294 case 2: return "SATA 3Gb/s"; 1295 case 3: return "SATA 6Gb/s"; 1296 case 0xff: return "SATA"; 1297 default: return "???"; 1298 } 1299 } 1300 #endif 1301 1302 int 1303 ata_atapi(device_t dev, int target) 1304 { 1305 struct ata_channel *ch = device_get_softc(dev); 1306 1307 return (ch->devices & (ATA_ATAPI_MASTER << target)); 1308 } 1309 1310 #ifndef ATA_CAM 1311 int 1312 ata_pmode(struct ata_params *ap) 1313 { 1314 if (ap->atavalid & ATA_FLAG_64_70) { 1315 if (ap->apiomodes & 0x02) 1316 return ATA_PIO4; 1317 if (ap->apiomodes & 0x01) 1318 return ATA_PIO3; 1319 } 1320 if (ap->mwdmamodes & 0x04) 1321 return ATA_PIO4; 1322 if (ap->mwdmamodes & 0x02) 1323 return ATA_PIO3; 1324 if (ap->mwdmamodes & 0x01) 1325 return ATA_PIO2; 1326 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200) 1327 return ATA_PIO2; 1328 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100) 1329 return ATA_PIO1; 1330 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000) 1331 return ATA_PIO0; 1332 return ATA_PIO0; 1333 } 1334 #endif 1335 1336 #ifndef ATA_CAM 1337 int 1338 ata_wmode(struct ata_params *ap) 1339 { 1340 if (ap->mwdmamodes & 0x04) 1341 return ATA_WDMA2; 1342 if (ap->mwdmamodes & 0x02) 1343 return ATA_WDMA1; 1344 if (ap->mwdmamodes & 0x01) 1345 return ATA_WDMA0; 1346 return -1; 1347 } 1348 #endif 1349 1350 #ifndef ATA_CAM 1351 int 1352 ata_umode(struct ata_params *ap) 1353 { 1354 if (ap->atavalid & ATA_FLAG_88) { 1355 if (ap->udmamodes & 0x40) 1356 return ATA_UDMA6; 1357 if (ap->udmamodes & 0x20) 1358 return ATA_UDMA5; 1359 if (ap->udmamodes & 0x10) 1360 return ATA_UDMA4; 1361 if (ap->udmamodes & 0x08) 1362 return ATA_UDMA3; 1363 if (ap->udmamodes & 0x04) 1364 return ATA_UDMA2; 1365 if (ap->udmamodes & 0x02) 1366 return ATA_UDMA1; 1367 if (ap->udmamodes & 0x01) 1368 return ATA_UDMA0; 1369 } 1370 return -1; 1371 } 1372 #endif 1373 1374 #ifndef ATA_CAM 1375 int 1376 ata_limit_mode(device_t dev, int mode, int maxmode) 1377 { 1378 struct ata_device *atadev = device_get_softc(dev); 1379 1380 if (maxmode && mode > maxmode) 1381 mode = maxmode; 1382 1383 if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0) 1384 return min(mode, ata_umode(&atadev->param)); 1385 1386 if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0) 1387 return min(mode, ata_wmode(&atadev->param)); 1388 1389 if (mode > ata_pmode(&atadev->param)) 1390 return min(mode, ata_pmode(&atadev->param)); 1391 1392 return mode; 1393 } 1394 #endif 1395 1396 #ifndef ATA_CAM 1397 static void 1398 bswap(int8_t *buf, int len) 1399 { 1400 u_int16_t *ptr = (u_int16_t*)(buf + len); 1401 1402 while (--ptr >= (u_int16_t*)buf) 1403 *ptr = ntohs(*ptr); 1404 } 1405 #endif 1406 1407 #ifndef ATA_CAM 1408 static void 1409 btrim(int8_t *buf, int len) 1410 { 1411 int8_t *ptr; 1412 1413 for (ptr = buf; ptr < buf+len; ++ptr) 1414 if (!*ptr || *ptr == '_') 1415 *ptr = ' '; 1416 for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr) 1417 *ptr = 0; 1418 } 1419 #endif 1420 1421 #ifndef ATA_CAM 1422 static void 1423 bpack(int8_t *src, int8_t *dst, int len) 1424 { 1425 int i, j, blank; 1426 1427 for (i = j = blank = 0 ; i < len; i++) { 1428 if (blank && src[i] == ' ') continue; 1429 if (blank && src[i] != ' ') { 1430 dst[j++] = src[i]; 1431 blank = 0; 1432 continue; 1433 } 1434 if (src[i] == ' ') { 1435 blank = 1; 1436 if (i == 0) 1437 continue; 1438 } 1439 dst[j++] = src[i]; 1440 } 1441 if (j < len) 1442 dst[j] = 0x00; 1443 } 1444 #endif 1445 1446 #ifdef ATA_CAM 1447 void 1448 ata_cam_begin_transaction(device_t dev, union ccb *ccb) 1449 { 1450 struct ata_channel *ch = device_get_softc(dev); 1451 struct ata_request *request; 1452 1453 if (!(request = ata_alloc_request())) { 1454 device_printf(dev, "FAILURE - out of memory in start\n"); 1455 ccb->ccb_h.status = CAM_REQ_INVALID; 1456 xpt_done(ccb); 1457 return; 1458 } 1459 bzero(request, sizeof(*request)); 1460 1461 /* setup request */ 1462 request->dev = NULL; 1463 request->parent = dev; 1464 request->unit = ccb->ccb_h.target_id; 1465 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1466 request->data = ccb->ataio.data_ptr; 1467 request->bytecount = ccb->ataio.dxfer_len; 1468 request->u.ata.command = ccb->ataio.cmd.command; 1469 request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) | 1470 (uint16_t)ccb->ataio.cmd.features; 1471 request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) | 1472 (uint16_t)ccb->ataio.cmd.sector_count; 1473 if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) { 1474 request->flags |= ATA_R_48BIT; 1475 request->u.ata.lba = 1476 ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) | 1477 ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) | 1478 ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24); 1479 } else { 1480 request->u.ata.lba = 1481 ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24); 1482 } 1483 request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) | 1484 ((uint64_t)ccb->ataio.cmd.lba_mid << 8) | 1485 (uint64_t)ccb->ataio.cmd.lba_low; 1486 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1487 ccb->ataio.cmd.flags & CAM_ATAIO_DMA) 1488 request->flags |= ATA_R_DMA; 1489 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1490 request->flags |= ATA_R_READ; 1491 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 1492 request->flags |= ATA_R_WRITE; 1493 } else { 1494 request->data = ccb->csio.data_ptr; 1495 request->bytecount = ccb->csio.dxfer_len; 1496 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 1497 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, 1498 request->u.atapi.ccb, ccb->csio.cdb_len); 1499 request->flags |= ATA_R_ATAPI; 1500 if (ch->curr[ccb->ccb_h.target_id].atapi == 16) 1501 request->flags |= ATA_R_ATAPI16; 1502 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1503 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 1504 request->flags |= ATA_R_DMA; 1505 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1506 request->flags |= ATA_R_READ; 1507 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 1508 request->flags |= ATA_R_WRITE; 1509 } 1510 request->transfersize = min(request->bytecount, 1511 ch->curr[ccb->ccb_h.target_id].bytecount); 1512 request->retries = 0; 1513 request->timeout = (ccb->ccb_h.timeout + 999) / 1000; 1514 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); 1515 request->ccb = ccb; 1516 1517 ch->running = request; 1518 ch->state = ATA_ACTIVE; 1519 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { 1520 ch->running = NULL; 1521 ch->state = ATA_IDLE; 1522 ata_cam_end_transaction(dev, request); 1523 return; 1524 } 1525 } 1526 1527 static void 1528 ata_cam_request_sense(device_t dev, struct ata_request *request) 1529 { 1530 struct ata_channel *ch = device_get_softc(dev); 1531 union ccb *ccb = request->ccb; 1532 1533 ch->requestsense = 1; 1534 1535 bzero(request, sizeof(*request)); 1536 request->dev = NULL; 1537 request->parent = dev; 1538 request->unit = ccb->ccb_h.target_id; 1539 request->data = (void *)&ccb->csio.sense_data; 1540 request->bytecount = ccb->csio.sense_len; 1541 request->u.atapi.ccb[0] = ATAPI_REQUEST_SENSE; 1542 request->u.atapi.ccb[4] = ccb->csio.sense_len; 1543 request->flags |= ATA_R_ATAPI; 1544 if (ch->curr[ccb->ccb_h.target_id].atapi == 16) 1545 request->flags |= ATA_R_ATAPI16; 1546 if (ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 1547 request->flags |= ATA_R_DMA; 1548 request->flags |= ATA_R_READ; 1549 request->transfersize = min(request->bytecount, 1550 ch->curr[ccb->ccb_h.target_id].bytecount); 1551 request->retries = 0; 1552 request->timeout = (ccb->ccb_h.timeout + 999) / 1000; 1553 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); 1554 request->ccb = ccb; 1555 1556 ch->running = request; 1557 ch->state = ATA_ACTIVE; 1558 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { 1559 ch->running = NULL; 1560 ch->state = ATA_IDLE; 1561 ata_cam_end_transaction(dev, request); 1562 return; 1563 } 1564 } 1565 1566 static void 1567 ata_cam_process_sense(device_t dev, struct ata_request *request) 1568 { 1569 struct ata_channel *ch = device_get_softc(dev); 1570 union ccb *ccb = request->ccb; 1571 int fatalerr = 0; 1572 1573 ch->requestsense = 0; 1574 1575 if (request->flags & ATA_R_TIMEOUT) 1576 fatalerr = 1; 1577 if ((request->flags & ATA_R_TIMEOUT) == 0 && 1578 (request->status & ATA_S_ERROR) == 0 && 1579 request->result == 0) { 1580 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 1581 } else { 1582 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1583 ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 1584 } 1585 1586 ata_free_request(request); 1587 xpt_done(ccb); 1588 /* Do error recovery if needed. */ 1589 if (fatalerr) 1590 ata_reinit(dev); 1591 } 1592 1593 void 1594 ata_cam_end_transaction(device_t dev, struct ata_request *request) 1595 { 1596 struct ata_channel *ch = device_get_softc(dev); 1597 union ccb *ccb = request->ccb; 1598 int fatalerr = 0; 1599 1600 if (ch->requestsense) { 1601 ata_cam_process_sense(dev, request); 1602 return; 1603 } 1604 1605 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1606 if (request->flags & ATA_R_TIMEOUT) { 1607 xpt_freeze_simq(ch->sim, 1); 1608 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1609 ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ; 1610 fatalerr = 1; 1611 } else if (request->status & ATA_S_ERROR) { 1612 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1613 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; 1614 } else { 1615 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1616 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1617 } 1618 } else if (request->result == ERESTART) 1619 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1620 else if (request->result != 0) 1621 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 1622 else 1623 ccb->ccb_h.status |= CAM_REQ_CMP; 1624 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && 1625 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 1626 xpt_freeze_devq(ccb->ccb_h.path, 1); 1627 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1628 } 1629 if (ccb->ccb_h.func_code == XPT_ATA_IO && 1630 ((request->status & ATA_S_ERROR) || 1631 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) { 1632 struct ata_res *res = &ccb->ataio.res; 1633 res->status = request->status; 1634 res->error = request->error; 1635 res->lba_low = request->u.ata.lba; 1636 res->lba_mid = request->u.ata.lba >> 8; 1637 res->lba_high = request->u.ata.lba >> 16; 1638 res->device = request->u.ata.lba >> 24; 1639 res->lba_low_exp = request->u.ata.lba >> 24; 1640 res->lba_mid_exp = request->u.ata.lba >> 32; 1641 res->lba_high_exp = request->u.ata.lba >> 40; 1642 res->sector_count = request->u.ata.count; 1643 res->sector_count_exp = request->u.ata.count >> 8; 1644 } 1645 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1646 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1647 ccb->ataio.resid = 1648 ccb->ataio.dxfer_len - request->donecount; 1649 } else { 1650 ccb->csio.resid = 1651 ccb->csio.dxfer_len - request->donecount; 1652 } 1653 } 1654 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && 1655 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 1656 ata_cam_request_sense(dev, request); 1657 else { 1658 ata_free_request(request); 1659 xpt_done(ccb); 1660 } 1661 /* Do error recovery if needed. */ 1662 if (fatalerr) 1663 ata_reinit(dev); 1664 } 1665 1666 static int 1667 ata_check_ids(device_t dev, union ccb *ccb) 1668 { 1669 struct ata_channel *ch = device_get_softc(dev); 1670 1671 if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) { 1672 ccb->ccb_h.status = CAM_TID_INVALID; 1673 xpt_done(ccb); 1674 return (-1); 1675 } 1676 if (ccb->ccb_h.target_lun != 0) { 1677 ccb->ccb_h.status = CAM_LUN_INVALID; 1678 xpt_done(ccb); 1679 return (-1); 1680 } 1681 return (0); 1682 } 1683 1684 static void 1685 ataaction(struct cam_sim *sim, union ccb *ccb) 1686 { 1687 device_t dev, parent; 1688 struct ata_channel *ch; 1689 1690 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n", 1691 ccb->ccb_h.func_code)); 1692 1693 ch = (struct ata_channel *)cam_sim_softc(sim); 1694 dev = ch->dev; 1695 switch (ccb->ccb_h.func_code) { 1696 /* Common cases first */ 1697 case XPT_ATA_IO: /* Execute the requested I/O operation */ 1698 case XPT_SCSI_IO: 1699 if (ata_check_ids(dev, ccb)) 1700 return; 1701 if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) 1702 << ccb->ccb_h.target_id)) == 0) { 1703 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1704 break; 1705 } 1706 if (ch->running) 1707 device_printf(dev, "already running!\n"); 1708 if (ccb->ccb_h.func_code == XPT_ATA_IO && 1709 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1710 (ccb->ataio.cmd.control & ATA_A_RESET)) { 1711 struct ata_res *res = &ccb->ataio.res; 1712 1713 bzero(res, sizeof(*res)); 1714 if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) { 1715 res->lba_high = 0; 1716 res->lba_mid = 0; 1717 } else { 1718 res->lba_high = 0xeb; 1719 res->lba_mid = 0x14; 1720 } 1721 ccb->ccb_h.status = CAM_REQ_CMP; 1722 break; 1723 } 1724 ata_cam_begin_transaction(dev, ccb); 1725 return; 1726 case XPT_EN_LUN: /* Enable LUN as a target */ 1727 case XPT_TARGET_IO: /* Execute target I/O request */ 1728 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 1729 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ 1730 case XPT_ABORT: /* Abort the specified CCB */ 1731 /* XXX Implement */ 1732 ccb->ccb_h.status = CAM_REQ_INVALID; 1733 break; 1734 case XPT_SET_TRAN_SETTINGS: 1735 { 1736 struct ccb_trans_settings *cts = &ccb->cts; 1737 struct ata_cam_device *d; 1738 1739 if (ata_check_ids(dev, ccb)) 1740 return; 1741 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 1742 d = &ch->curr[ccb->ccb_h.target_id]; 1743 else 1744 d = &ch->user[ccb->ccb_h.target_id]; 1745 if (ch->flags & ATA_SATA) { 1746 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) 1747 d->revision = cts->xport_specific.sata.revision; 1748 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) { 1749 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1750 d->mode = ATA_SETMODE(ch->dev, 1751 ccb->ccb_h.target_id, 1752 cts->xport_specific.sata.mode); 1753 } else 1754 d->mode = cts->xport_specific.sata.mode; 1755 } 1756 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) 1757 d->bytecount = min(8192, cts->xport_specific.sata.bytecount); 1758 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) 1759 d->atapi = cts->xport_specific.sata.atapi; 1760 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) 1761 d->caps = cts->xport_specific.sata.caps; 1762 } else { 1763 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) { 1764 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1765 d->mode = ATA_SETMODE(ch->dev, 1766 ccb->ccb_h.target_id, 1767 cts->xport_specific.ata.mode); 1768 } else 1769 d->mode = cts->xport_specific.ata.mode; 1770 } 1771 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) 1772 d->bytecount = cts->xport_specific.ata.bytecount; 1773 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI) 1774 d->atapi = cts->xport_specific.ata.atapi; 1775 } 1776 ccb->ccb_h.status = CAM_REQ_CMP; 1777 break; 1778 } 1779 case XPT_GET_TRAN_SETTINGS: 1780 { 1781 struct ccb_trans_settings *cts = &ccb->cts; 1782 struct ata_cam_device *d; 1783 1784 if (ata_check_ids(dev, ccb)) 1785 return; 1786 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 1787 d = &ch->curr[ccb->ccb_h.target_id]; 1788 else 1789 d = &ch->user[ccb->ccb_h.target_id]; 1790 cts->protocol = PROTO_UNSPECIFIED; 1791 cts->protocol_version = PROTO_VERSION_UNSPECIFIED; 1792 if (ch->flags & ATA_SATA) { 1793 cts->transport = XPORT_SATA; 1794 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 1795 cts->xport_specific.sata.valid = 0; 1796 cts->xport_specific.sata.mode = d->mode; 1797 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; 1798 cts->xport_specific.sata.bytecount = d->bytecount; 1799 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; 1800 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1801 cts->xport_specific.sata.revision = 1802 ATA_GETREV(dev, ccb->ccb_h.target_id); 1803 if (cts->xport_specific.sata.revision != 0xff) { 1804 cts->xport_specific.sata.valid |= 1805 CTS_SATA_VALID_REVISION; 1806 } 1807 cts->xport_specific.sata.caps = 1808 d->caps & CTS_SATA_CAPS_D; 1809 if (ch->pm_level) { 1810 cts->xport_specific.sata.caps |= 1811 CTS_SATA_CAPS_H_PMREQ; 1812 } 1813 cts->xport_specific.sata.caps &= 1814 ch->user[ccb->ccb_h.target_id].caps; 1815 cts->xport_specific.sata.valid |= 1816 CTS_SATA_VALID_CAPS; 1817 } else { 1818 cts->xport_specific.sata.revision = d->revision; 1819 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; 1820 cts->xport_specific.sata.caps = d->caps; 1821 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 1822 } 1823 cts->xport_specific.sata.atapi = d->atapi; 1824 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; 1825 } else { 1826 cts->transport = XPORT_ATA; 1827 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 1828 cts->xport_specific.ata.valid = 0; 1829 cts->xport_specific.ata.mode = d->mode; 1830 cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE; 1831 cts->xport_specific.ata.bytecount = d->bytecount; 1832 cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT; 1833 cts->xport_specific.ata.atapi = d->atapi; 1834 cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI; 1835 } 1836 ccb->ccb_h.status = CAM_REQ_CMP; 1837 break; 1838 } 1839 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 1840 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 1841 ata_reinit(dev); 1842 ccb->ccb_h.status = CAM_REQ_CMP; 1843 break; 1844 case XPT_TERM_IO: /* Terminate the I/O process */ 1845 /* XXX Implement */ 1846 ccb->ccb_h.status = CAM_REQ_INVALID; 1847 break; 1848 case XPT_PATH_INQ: /* Path routing inquiry */ 1849 { 1850 struct ccb_pathinq *cpi = &ccb->cpi; 1851 1852 parent = device_get_parent(dev); 1853 cpi->version_num = 1; /* XXX??? */ 1854 cpi->hba_inquiry = PI_SDTR_ABLE; 1855 cpi->target_sprt = 0; 1856 cpi->hba_misc = PIM_SEQSCAN; 1857 cpi->hba_eng_cnt = 0; 1858 if (ch->flags & ATA_NO_SLAVE) 1859 cpi->max_target = 0; 1860 else 1861 cpi->max_target = 1; 1862 cpi->max_lun = 0; 1863 cpi->initiator_id = 0; 1864 cpi->bus_id = cam_sim_bus(sim); 1865 if (ch->flags & ATA_SATA) 1866 cpi->base_transfer_speed = 150000; 1867 else 1868 cpi->base_transfer_speed = 3300; 1869 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1870 strncpy(cpi->hba_vid, "ATA", HBA_IDLEN); 1871 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1872 cpi->unit_number = cam_sim_unit(sim); 1873 if (ch->flags & ATA_SATA) 1874 cpi->transport = XPORT_SATA; 1875 else 1876 cpi->transport = XPORT_ATA; 1877 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 1878 cpi->protocol = PROTO_ATA; 1879 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 1880 cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS; 1881 if (device_get_devclass(device_get_parent(parent)) == 1882 devclass_find("pci")) { 1883 cpi->hba_vendor = pci_get_vendor(parent); 1884 cpi->hba_device = pci_get_device(parent); 1885 cpi->hba_subvendor = pci_get_subvendor(parent); 1886 cpi->hba_subdevice = pci_get_subdevice(parent); 1887 } 1888 cpi->ccb_h.status = CAM_REQ_CMP; 1889 break; 1890 } 1891 default: 1892 ccb->ccb_h.status = CAM_REQ_INVALID; 1893 break; 1894 } 1895 xpt_done(ccb); 1896 } 1897 1898 static void 1899 atapoll(struct cam_sim *sim) 1900 { 1901 struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim); 1902 1903 ata_interrupt_locked(ch); 1904 } 1905 #endif 1906 1907 /* 1908 * module handeling 1909 */ 1910 static int 1911 ata_module_event_handler(module_t mod, int what, void *arg) 1912 { 1913 #ifndef ATA_CAM 1914 static struct cdev *atacdev; 1915 #endif 1916 1917 switch (what) { 1918 case MOD_LOAD: 1919 #ifndef ATA_CAM 1920 /* register controlling device */ 1921 atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata"); 1922 1923 if (cold) { 1924 /* register boot attach to be run when interrupts are enabled */ 1925 if (!(ata_delayed_attach = (struct intr_config_hook *) 1926 malloc(sizeof(struct intr_config_hook), 1927 M_TEMP, M_NOWAIT | M_ZERO))) { 1928 printf("ata: malloc of delayed attach hook failed\n"); 1929 return EIO; 1930 } 1931 ata_delayed_attach->ich_func = (void*)ata_boot_attach; 1932 if (config_intrhook_establish(ata_delayed_attach) != 0) { 1933 printf("ata: config_intrhook_establish failed\n"); 1934 free(ata_delayed_attach, M_TEMP); 1935 } 1936 } 1937 #endif 1938 return 0; 1939 1940 case MOD_UNLOAD: 1941 #ifndef ATA_CAM 1942 /* deregister controlling device */ 1943 destroy_dev(atacdev); 1944 #endif 1945 return 0; 1946 1947 default: 1948 return EOPNOTSUPP; 1949 } 1950 } 1951 1952 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL }; 1953 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 1954 MODULE_VERSION(ata, 1); 1955 #ifdef ATA_CAM 1956 MODULE_DEPEND(ata, cam, 1, 1, 1); 1957 #endif 1958 1959 static void 1960 ata_init(void) 1961 { 1962 ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request), 1963 NULL, NULL, NULL, NULL, 0, 0); 1964 ata_composite_zone = uma_zcreate("ata_composite", 1965 sizeof(struct ata_composite), 1966 NULL, NULL, NULL, NULL, 0, 0); 1967 } 1968 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL); 1969 1970 static void 1971 ata_uninit(void) 1972 { 1973 uma_zdestroy(ata_composite_zone); 1974 uma_zdestroy(ata_request_zone); 1975 } 1976 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL); 1977