1 /*- 2 * Copyright (c) 1998 - 2008 S�ren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ata.h" 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/ata.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/endian.h> 37 #include <sys/ctype.h> 38 #include <sys/conf.h> 39 #include <sys/bus.h> 40 #include <sys/bio.h> 41 #include <sys/malloc.h> 42 #include <sys/sysctl.h> 43 #include <sys/sema.h> 44 #include <sys/taskqueue.h> 45 #include <vm/uma.h> 46 #include <machine/stdarg.h> 47 #include <machine/resource.h> 48 #include <machine/bus.h> 49 #include <sys/rman.h> 50 #include <dev/ata/ata-all.h> 51 #include <ata_if.h> 52 53 #ifdef ATA_CAM 54 #include <cam/cam.h> 55 #include <cam/cam_ccb.h> 56 #include <cam/cam_sim.h> 57 #include <cam/cam_xpt_sim.h> 58 #include <cam/cam_debug.h> 59 #endif 60 61 #ifndef ATA_CAM 62 /* device structure */ 63 static d_ioctl_t ata_ioctl; 64 static struct cdevsw ata_cdevsw = { 65 .d_version = D_VERSION, 66 .d_flags = D_NEEDGIANT, /* we need this as newbus isn't mpsafe */ 67 .d_ioctl = ata_ioctl, 68 .d_name = "ata", 69 }; 70 #endif 71 72 /* prototypes */ 73 #ifndef ATA_CAM 74 static void ata_boot_attach(void); 75 static device_t ata_add_child(device_t, struct ata_device *, int); 76 #else 77 static void ataaction(struct cam_sim *sim, union ccb *ccb); 78 static void atapoll(struct cam_sim *sim); 79 #endif 80 static void ata_conn_event(void *, int); 81 static void bswap(int8_t *, int); 82 static void btrim(int8_t *, int); 83 static void bpack(int8_t *, int8_t *, int); 84 static void ata_interrupt_locked(void *data); 85 86 /* global vars */ 87 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer"); 88 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL; 89 struct intr_config_hook *ata_delayed_attach = NULL; 90 devclass_t ata_devclass; 91 uma_zone_t ata_request_zone; 92 uma_zone_t ata_composite_zone; 93 int ata_wc = 1; 94 int ata_setmax = 0; 95 int ata_dma_check_80pin = 1; 96 97 /* local vars */ 98 static int ata_dma = 1; 99 static int atapi_dma = 1; 100 101 /* sysctl vars */ 102 SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); 103 TUNABLE_INT("hw.ata.ata_dma", &ata_dma); 104 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0, 105 "ATA disk DMA mode control"); 106 TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin); 107 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin, 108 CTLFLAG_RDTUN, &ata_dma_check_80pin, 1, 109 "Check for 80pin cable before setting ATA DMA mode"); 110 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma); 111 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0, 112 "ATAPI device DMA mode control"); 113 TUNABLE_INT("hw.ata.wc", &ata_wc); 114 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0, 115 "ATA disk write caching"); 116 TUNABLE_INT("hw.ata.setmax", &ata_setmax); 117 SYSCTL_INT(_hw_ata, OID_AUTO, setmax, CTLFLAG_RDTUN, &ata_setmax, 0, 118 "ATA disk set max native address"); 119 120 /* 121 * newbus device interface related functions 122 */ 123 int 124 ata_probe(device_t dev) 125 { 126 return 0; 127 } 128 129 int 130 ata_attach(device_t dev) 131 { 132 struct ata_channel *ch = device_get_softc(dev); 133 int error, rid; 134 #ifdef ATA_CAM 135 struct cam_devq *devq; 136 int i; 137 #endif 138 139 /* check that we have a virgin channel to attach */ 140 if (ch->r_irq) 141 return EEXIST; 142 143 /* initialize the softc basics */ 144 ch->dev = dev; 145 ch->state = ATA_IDLE; 146 bzero(&ch->state_mtx, sizeof(struct mtx)); 147 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF); 148 bzero(&ch->queue_mtx, sizeof(struct mtx)); 149 mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF); 150 TAILQ_INIT(&ch->ata_queue); 151 TASK_INIT(&ch->conntask, 0, ata_conn_event, dev); 152 #ifdef ATA_CAM 153 for (i = 0; i < 16; i++) { 154 ch->user[i].mode = 0; 155 if (ch->flags & ATA_SATA) 156 ch->user[i].bytecount = 8192; 157 else 158 ch->user[i].bytecount = MAXPHYS; 159 ch->curr[i] = ch->user[i]; 160 } 161 #endif 162 163 /* reset the controller HW, the channel and device(s) */ 164 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) 165 pause("ataatch", 1); 166 #ifndef ATA_CAM 167 ATA_RESET(dev); 168 #endif 169 ATA_LOCKING(dev, ATA_LF_UNLOCK); 170 171 /* allocate DMA resources if DMA HW present*/ 172 if (ch->dma.alloc) 173 ch->dma.alloc(dev); 174 175 /* setup interrupt delivery */ 176 rid = ATA_IRQ_RID; 177 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 178 RF_SHAREABLE | RF_ACTIVE); 179 if (!ch->r_irq) { 180 device_printf(dev, "unable to allocate interrupt\n"); 181 return ENXIO; 182 } 183 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 184 ata_interrupt, ch, &ch->ih))) { 185 device_printf(dev, "unable to setup interrupt\n"); 186 return error; 187 } 188 189 #ifndef ATA_CAM 190 /* probe and attach devices on this channel unless we are in early boot */ 191 if (!ata_delayed_attach) 192 ata_identify(dev); 193 return (0); 194 #else 195 mtx_lock(&ch->state_mtx); 196 /* Create the device queue for our SIM. */ 197 devq = cam_simq_alloc(1); 198 if (devq == NULL) { 199 device_printf(dev, "Unable to allocate simq\n"); 200 error = ENOMEM; 201 goto err1; 202 } 203 /* Construct SIM entry */ 204 ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch, 205 device_get_unit(dev), &ch->state_mtx, 1, 0, devq); 206 if (ch->sim == NULL) { 207 device_printf(dev, "unable to allocate sim\n"); 208 error = ENOMEM; 209 goto err2; 210 } 211 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { 212 device_printf(dev, "unable to register xpt bus\n"); 213 error = ENXIO; 214 goto err2; 215 } 216 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), 217 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 218 device_printf(dev, "unable to create path\n"); 219 error = ENXIO; 220 goto err3; 221 } 222 mtx_unlock(&ch->state_mtx); 223 return (0); 224 225 err3: 226 xpt_bus_deregister(cam_sim_path(ch->sim)); 227 err2: 228 cam_sim_free(ch->sim, /*free_devq*/TRUE); 229 err1: 230 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 231 mtx_unlock(&ch->state_mtx); 232 return (error); 233 #endif 234 } 235 236 int 237 ata_detach(device_t dev) 238 { 239 struct ata_channel *ch = device_get_softc(dev); 240 #ifndef ATA_CAM 241 device_t *children; 242 int nchildren, i; 243 #endif 244 245 /* check that we have a valid channel to detach */ 246 if (!ch->r_irq) 247 return ENXIO; 248 249 /* grap the channel lock so no new requests gets launched */ 250 mtx_lock(&ch->state_mtx); 251 ch->state |= ATA_STALL_QUEUE; 252 mtx_unlock(&ch->state_mtx); 253 254 #ifndef ATA_CAM 255 /* detach & delete all children */ 256 if (!device_get_children(dev, &children, &nchildren)) { 257 for (i = 0; i < nchildren; i++) 258 if (children[i]) 259 device_delete_child(dev, children[i]); 260 free(children, M_TEMP); 261 } 262 #endif 263 taskqueue_drain(taskqueue_thread, &ch->conntask); 264 265 #ifdef ATA_CAM 266 mtx_lock(&ch->state_mtx); 267 xpt_async(AC_LOST_DEVICE, ch->path, NULL); 268 xpt_free_path(ch->path); 269 xpt_bus_deregister(cam_sim_path(ch->sim)); 270 cam_sim_free(ch->sim, /*free_devq*/TRUE); 271 mtx_unlock(&ch->state_mtx); 272 #endif 273 274 /* release resources */ 275 bus_teardown_intr(dev, ch->r_irq, ch->ih); 276 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 277 ch->r_irq = NULL; 278 279 /* free DMA resources if DMA HW present*/ 280 if (ch->dma.free) 281 ch->dma.free(dev); 282 283 mtx_destroy(&ch->state_mtx); 284 mtx_destroy(&ch->queue_mtx); 285 return 0; 286 } 287 288 static void 289 ata_conn_event(void *context, int dummy) 290 { 291 device_t dev = (device_t)context; 292 #ifdef ATA_CAM 293 struct ata_channel *ch = device_get_softc(dev); 294 union ccb *ccb; 295 296 mtx_lock(&ch->state_mtx); 297 ata_reinit(dev); 298 mtx_unlock(&ch->state_mtx); 299 if ((ccb = xpt_alloc_ccb()) == NULL) 300 return; 301 if (xpt_create_path(&ccb->ccb_h.path, NULL, 302 cam_sim_path(ch->sim), 303 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 304 xpt_free_ccb(ccb); 305 return; 306 } 307 xpt_rescan(ccb); 308 #else 309 ata_reinit(dev); 310 #endif 311 } 312 313 int 314 ata_reinit(device_t dev) 315 { 316 struct ata_channel *ch = device_get_softc(dev); 317 struct ata_request *request; 318 #ifndef ATA_CAM 319 device_t *children; 320 int nchildren, i; 321 322 /* check that we have a valid channel to reinit */ 323 if (!ch || !ch->r_irq) 324 return ENXIO; 325 326 if (bootverbose) 327 device_printf(dev, "reiniting channel ..\n"); 328 329 /* poll for locking the channel */ 330 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) 331 pause("atarini", 1); 332 333 /* catch eventual request in ch->running */ 334 mtx_lock(&ch->state_mtx); 335 if (ch->state & ATA_STALL_QUEUE) { 336 /* Recursive reinits and reinits during detach prohobited. */ 337 mtx_unlock(&ch->state_mtx); 338 return (ENXIO); 339 } 340 if ((request = ch->running)) 341 callout_stop(&request->callout); 342 ch->running = NULL; 343 344 /* unconditionally grap the channel lock */ 345 ch->state |= ATA_STALL_QUEUE; 346 mtx_unlock(&ch->state_mtx); 347 348 /* reset the controller HW, the channel and device(s) */ 349 ATA_RESET(dev); 350 351 /* reinit the children and delete any that fails */ 352 if (!device_get_children(dev, &children, &nchildren)) { 353 mtx_lock(&Giant); /* newbus suckage it needs Giant */ 354 for (i = 0; i < nchildren; i++) { 355 /* did any children go missing ? */ 356 if (children[i] && device_is_attached(children[i]) && 357 ATA_REINIT(children[i])) { 358 /* 359 * if we had a running request and its device matches 360 * this child we need to inform the request that the 361 * device is gone. 362 */ 363 if (request && request->dev == children[i]) { 364 request->result = ENXIO; 365 device_printf(request->dev, "FAILURE - device detached\n"); 366 367 /* if not timeout finish request here */ 368 if (!(request->flags & ATA_R_TIMEOUT)) 369 ata_finish(request); 370 request = NULL; 371 } 372 device_delete_child(dev, children[i]); 373 } 374 } 375 free(children, M_TEMP); 376 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ 377 } 378 379 /* if we still have a good request put it on the queue again */ 380 if (request && !(request->flags & ATA_R_TIMEOUT)) { 381 device_printf(request->dev, 382 "WARNING - %s requeued due to channel reset", 383 ata_cmd2str(request)); 384 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) 385 printf(" LBA=%ju", request->u.ata.lba); 386 printf("\n"); 387 request->flags |= ATA_R_REQUEUE; 388 ata_queue_request(request); 389 } 390 391 /* we're done release the channel for new work */ 392 mtx_lock(&ch->state_mtx); 393 ch->state = ATA_IDLE; 394 mtx_unlock(&ch->state_mtx); 395 ATA_LOCKING(dev, ATA_LF_UNLOCK); 396 397 /* Add new children. */ 398 /* ata_identify(dev); */ 399 400 if (bootverbose) 401 device_printf(dev, "reinit done ..\n"); 402 403 /* kick off requests on the queue */ 404 ata_start(dev); 405 #else 406 xpt_freeze_simq(ch->sim, 1); 407 if ((request = ch->running)) { 408 ch->running = NULL; 409 if (ch->state == ATA_ACTIVE) 410 ch->state = ATA_IDLE; 411 callout_stop(&request->callout); 412 if (ch->dma.unload) 413 ch->dma.unload(request); 414 request->result = ERESTART; 415 ata_cam_end_transaction(dev, request); 416 } 417 /* reset the controller HW, the channel and device(s) */ 418 ATA_RESET(dev); 419 /* Tell the XPT about the event */ 420 xpt_async(AC_BUS_RESET, ch->path, NULL); 421 xpt_release_simq(ch->sim, TRUE); 422 #endif 423 return(0); 424 } 425 426 int 427 ata_suspend(device_t dev) 428 { 429 struct ata_channel *ch; 430 431 /* check for valid device */ 432 if (!dev || !(ch = device_get_softc(dev))) 433 return ENXIO; 434 435 #ifdef ATA_CAM 436 mtx_lock(&ch->state_mtx); 437 xpt_freeze_simq(ch->sim, 1); 438 while (ch->state != ATA_IDLE) 439 msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100); 440 mtx_unlock(&ch->state_mtx); 441 #else 442 /* wait for the channel to be IDLE or detached before suspending */ 443 while (ch->r_irq) { 444 mtx_lock(&ch->state_mtx); 445 if (ch->state == ATA_IDLE) { 446 ch->state = ATA_ACTIVE; 447 mtx_unlock(&ch->state_mtx); 448 break; 449 } 450 mtx_unlock(&ch->state_mtx); 451 tsleep(ch, PRIBIO, "atasusp", hz/10); 452 } 453 ATA_LOCKING(dev, ATA_LF_UNLOCK); 454 #endif 455 return(0); 456 } 457 458 int 459 ata_resume(device_t dev) 460 { 461 struct ata_channel *ch; 462 int error; 463 464 /* check for valid device */ 465 if (!dev || !(ch = device_get_softc(dev))) 466 return ENXIO; 467 468 #ifdef ATA_CAM 469 mtx_lock(&ch->state_mtx); 470 error = ata_reinit(dev); 471 xpt_release_simq(ch->sim, TRUE); 472 mtx_unlock(&ch->state_mtx); 473 #else 474 /* reinit the devices, we dont know what mode/state they are in */ 475 error = ata_reinit(dev); 476 /* kick off requests on the queue */ 477 ata_start(dev); 478 #endif 479 return error; 480 } 481 482 void 483 ata_interrupt(void *data) 484 { 485 #ifdef ATA_CAM 486 struct ata_channel *ch = (struct ata_channel *)data; 487 488 mtx_lock(&ch->state_mtx); 489 #endif 490 ata_interrupt_locked(data); 491 #ifdef ATA_CAM 492 mtx_unlock(&ch->state_mtx); 493 #endif 494 } 495 496 static void 497 ata_interrupt_locked(void *data) 498 { 499 struct ata_channel *ch = (struct ata_channel *)data; 500 struct ata_request *request; 501 502 #ifndef ATA_CAM 503 mtx_lock(&ch->state_mtx); 504 #endif 505 do { 506 /* ignore interrupt if its not for us */ 507 if (ch->hw.status && !ch->hw.status(ch->dev)) 508 break; 509 510 /* do we have a running request */ 511 if (!(request = ch->running)) 512 break; 513 514 ATA_DEBUG_RQ(request, "interrupt"); 515 516 /* safetycheck for the right state */ 517 if (ch->state == ATA_IDLE) { 518 device_printf(request->dev, "interrupt on idle channel ignored\n"); 519 break; 520 } 521 522 /* 523 * we have the HW locks, so end the transaction for this request 524 * if it finishes immediately otherwise wait for next interrupt 525 */ 526 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) { 527 ch->running = NULL; 528 if (ch->state == ATA_ACTIVE) 529 ch->state = ATA_IDLE; 530 #ifdef ATA_CAM 531 ata_cam_end_transaction(ch->dev, request); 532 #else 533 mtx_unlock(&ch->state_mtx); 534 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK); 535 ata_finish(request); 536 #endif 537 return; 538 } 539 } while (0); 540 #ifndef ATA_CAM 541 mtx_unlock(&ch->state_mtx); 542 #endif 543 } 544 545 void 546 ata_print_cable(device_t dev, u_int8_t *who) 547 { 548 device_printf(dev, 549 "DMA limited to UDMA33, %s found non-ATA66 cable\n", who); 550 } 551 552 int 553 ata_check_80pin(device_t dev, int mode) 554 { 555 struct ata_device *atadev = device_get_softc(dev); 556 557 if (!ata_dma_check_80pin) { 558 if (bootverbose) 559 device_printf(dev, "Skipping 80pin cable check\n"); 560 return mode; 561 } 562 563 if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) { 564 ata_print_cable(dev, "device"); 565 mode = ATA_UDMA2; 566 } 567 return mode; 568 } 569 570 void 571 ata_setmode(device_t dev) 572 { 573 struct ata_channel *ch = device_get_softc(device_get_parent(dev)); 574 struct ata_device *atadev = device_get_softc(dev); 575 int error, mode, pmode; 576 577 mode = atadev->mode; 578 do { 579 pmode = mode = ata_limit_mode(dev, mode, ATA_DMA_MAX); 580 mode = ATA_SETMODE(device_get_parent(dev), atadev->unit, mode); 581 if ((ch->flags & (ATA_CHECKS_CABLE | ATA_SATA)) == 0) 582 mode = ata_check_80pin(dev, mode); 583 } while (pmode != mode); /* Interate till successfull negotiation. */ 584 error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); 585 if (bootverbose) 586 device_printf(dev, "%ssetting %s\n", 587 (error) ? "FAILURE " : "", ata_mode2str(mode)); 588 atadev->mode = mode; 589 } 590 591 /* 592 * device related interfaces 593 */ 594 #ifndef ATA_CAM 595 static int 596 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data, 597 int32_t flag, struct thread *td) 598 { 599 device_t device, *children; 600 struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data; 601 int *value = (int *)data; 602 int i, nchildren, error = ENOTTY; 603 604 switch (cmd) { 605 case IOCATAGMAXCHANNEL: 606 /* In case we have channel 0..n this will return n+1. */ 607 *value = devclass_get_maxunit(ata_devclass); 608 error = 0; 609 break; 610 611 case IOCATAREINIT: 612 if (*value >= devclass_get_maxunit(ata_devclass) || 613 !(device = devclass_get_device(ata_devclass, *value)) || 614 !device_is_attached(device)) 615 return ENXIO; 616 error = ata_reinit(device); 617 break; 618 619 case IOCATAATTACH: 620 if (*value >= devclass_get_maxunit(ata_devclass) || 621 !(device = devclass_get_device(ata_devclass, *value)) || 622 !device_is_attached(device)) 623 return ENXIO; 624 error = DEVICE_ATTACH(device); 625 break; 626 627 case IOCATADETACH: 628 if (*value >= devclass_get_maxunit(ata_devclass) || 629 !(device = devclass_get_device(ata_devclass, *value)) || 630 !device_is_attached(device)) 631 return ENXIO; 632 error = DEVICE_DETACH(device); 633 break; 634 635 case IOCATADEVICES: 636 if (devices->channel >= devclass_get_maxunit(ata_devclass) || 637 !(device = devclass_get_device(ata_devclass, devices->channel)) || 638 !device_is_attached(device)) 639 return ENXIO; 640 bzero(devices->name[0], 32); 641 bzero(&devices->params[0], sizeof(struct ata_params)); 642 bzero(devices->name[1], 32); 643 bzero(&devices->params[1], sizeof(struct ata_params)); 644 if (!device_get_children(device, &children, &nchildren)) { 645 for (i = 0; i < nchildren; i++) { 646 if (children[i] && device_is_attached(children[i])) { 647 struct ata_device *atadev = device_get_softc(children[i]); 648 649 if (atadev->unit == ATA_MASTER) { /* XXX SOS PM */ 650 strncpy(devices->name[0], 651 device_get_nameunit(children[i]), 32); 652 bcopy(&atadev->param, &devices->params[0], 653 sizeof(struct ata_params)); 654 } 655 if (atadev->unit == ATA_SLAVE) { /* XXX SOS PM */ 656 strncpy(devices->name[1], 657 device_get_nameunit(children[i]), 32); 658 bcopy(&atadev->param, &devices->params[1], 659 sizeof(struct ata_params)); 660 } 661 } 662 } 663 free(children, M_TEMP); 664 error = 0; 665 } 666 else 667 error = ENODEV; 668 break; 669 670 default: 671 if (ata_raid_ioctl_func) 672 error = ata_raid_ioctl_func(cmd, data); 673 } 674 return error; 675 } 676 #endif 677 678 int 679 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data) 680 { 681 struct ata_device *atadev = device_get_softc(dev); 682 struct ata_channel *ch = device_get_softc(device_get_parent(dev)); 683 struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data; 684 struct ata_params *params = (struct ata_params *)data; 685 int *mode = (int *)data; 686 struct ata_request *request; 687 caddr_t buf; 688 int error; 689 690 switch (cmd) { 691 case IOCATAREQUEST: 692 if (ioc_request->count > 693 (ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS)) { 694 return (EFBIG); 695 } 696 if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) { 697 return ENOMEM; 698 } 699 if (!(request = ata_alloc_request())) { 700 free(buf, M_ATA); 701 return ENOMEM; 702 } 703 request->dev = atadev->dev; 704 if (ioc_request->flags & ATA_CMD_WRITE) { 705 error = copyin(ioc_request->data, buf, ioc_request->count); 706 if (error) { 707 free(buf, M_ATA); 708 ata_free_request(request); 709 return error; 710 } 711 } 712 if (ioc_request->flags & ATA_CMD_ATAPI) { 713 request->flags = ATA_R_ATAPI; 714 bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16); 715 } 716 else { 717 request->u.ata.command = ioc_request->u.ata.command; 718 request->u.ata.feature = ioc_request->u.ata.feature; 719 request->u.ata.lba = ioc_request->u.ata.lba; 720 request->u.ata.count = ioc_request->u.ata.count; 721 } 722 request->timeout = ioc_request->timeout; 723 request->data = buf; 724 request->bytecount = ioc_request->count; 725 request->transfersize = request->bytecount; 726 if (ioc_request->flags & ATA_CMD_CONTROL) 727 request->flags |= ATA_R_CONTROL; 728 if (ioc_request->flags & ATA_CMD_READ) 729 request->flags |= ATA_R_READ; 730 if (ioc_request->flags & ATA_CMD_WRITE) 731 request->flags |= ATA_R_WRITE; 732 ata_queue_request(request); 733 if (request->flags & ATA_R_ATAPI) { 734 bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense, 735 sizeof(struct atapi_sense)); 736 } 737 else { 738 ioc_request->u.ata.command = request->u.ata.command; 739 ioc_request->u.ata.feature = request->u.ata.feature; 740 ioc_request->u.ata.lba = request->u.ata.lba; 741 ioc_request->u.ata.count = request->u.ata.count; 742 } 743 ioc_request->error = request->result; 744 if (ioc_request->flags & ATA_CMD_READ) 745 error = copyout(buf, ioc_request->data, ioc_request->count); 746 else 747 error = 0; 748 free(buf, M_ATA); 749 ata_free_request(request); 750 return error; 751 752 case IOCATAGPARM: 753 ata_getparam(atadev, 0); 754 bcopy(&atadev->param, params, sizeof(struct ata_params)); 755 return 0; 756 757 case IOCATASMODE: 758 atadev->mode = *mode; 759 ata_setmode(dev); 760 return 0; 761 762 case IOCATAGMODE: 763 *mode = atadev->mode | 764 (ATA_GETREV(device_get_parent(dev), atadev->unit) << 8); 765 return 0; 766 case IOCATASSPINDOWN: 767 atadev->spindown = *mode; 768 return 0; 769 case IOCATAGSPINDOWN: 770 *mode = atadev->spindown; 771 return 0; 772 default: 773 return ENOTTY; 774 } 775 } 776 777 #ifndef ATA_CAM 778 static void 779 ata_boot_attach(void) 780 { 781 struct ata_channel *ch; 782 int ctlr; 783 784 mtx_lock(&Giant); /* newbus suckage it needs Giant */ 785 786 /* kick of probe and attach on all channels */ 787 for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) { 788 if ((ch = devclass_get_softc(ata_devclass, ctlr))) { 789 ata_identify(ch->dev); 790 } 791 } 792 793 /* release the hook that got us here, we are only needed once during boot */ 794 if (ata_delayed_attach) { 795 config_intrhook_disestablish(ata_delayed_attach); 796 free(ata_delayed_attach, M_TEMP); 797 ata_delayed_attach = NULL; 798 } 799 800 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ 801 } 802 #endif 803 804 /* 805 * misc support functions 806 */ 807 #ifndef ATA_CAM 808 static device_t 809 ata_add_child(device_t parent, struct ata_device *atadev, int unit) 810 { 811 device_t child; 812 813 if ((child = device_add_child(parent, NULL, unit))) { 814 device_set_softc(child, atadev); 815 device_quiet(child); 816 atadev->dev = child; 817 atadev->max_iosize = DEV_BSIZE; 818 atadev->mode = ATA_PIO_MAX; 819 } 820 return child; 821 } 822 #endif 823 824 int 825 ata_getparam(struct ata_device *atadev, int init) 826 { 827 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev)); 828 struct ata_request *request; 829 u_int8_t command = 0; 830 int error = ENOMEM, retries = 2; 831 832 if (ch->devices & (ATA_ATA_MASTER << atadev->unit)) 833 command = ATA_ATA_IDENTIFY; 834 if (ch->devices & (ATA_ATAPI_MASTER << atadev->unit)) 835 command = ATA_ATAPI_IDENTIFY; 836 if (!command) 837 return ENXIO; 838 839 while (retries-- > 0 && error) { 840 if (!(request = ata_alloc_request())) 841 break; 842 request->dev = atadev->dev; 843 request->timeout = 1; 844 request->retries = 0; 845 request->u.ata.command = command; 846 request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT); 847 if (!bootverbose) 848 request->flags |= ATA_R_QUIET; 849 request->data = (void *)&atadev->param; 850 request->bytecount = sizeof(struct ata_params); 851 request->donecount = 0; 852 request->transfersize = DEV_BSIZE; 853 ata_queue_request(request); 854 error = request->result; 855 ata_free_request(request); 856 } 857 858 if (!error && (isprint(atadev->param.model[0]) || 859 isprint(atadev->param.model[1]))) { 860 struct ata_params *atacap = &atadev->param; 861 int16_t *ptr; 862 863 for (ptr = (int16_t *)atacap; 864 ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) { 865 *ptr = le16toh(*ptr); 866 } 867 if (!(!strncmp(atacap->model, "FX", 2) || 868 !strncmp(atacap->model, "NEC", 3) || 869 !strncmp(atacap->model, "Pioneer", 7) || 870 !strncmp(atacap->model, "SHARP", 5))) { 871 bswap(atacap->model, sizeof(atacap->model)); 872 bswap(atacap->revision, sizeof(atacap->revision)); 873 bswap(atacap->serial, sizeof(atacap->serial)); 874 } 875 btrim(atacap->model, sizeof(atacap->model)); 876 bpack(atacap->model, atacap->model, sizeof(atacap->model)); 877 btrim(atacap->revision, sizeof(atacap->revision)); 878 bpack(atacap->revision, atacap->revision, sizeof(atacap->revision)); 879 btrim(atacap->serial, sizeof(atacap->serial)); 880 bpack(atacap->serial, atacap->serial, sizeof(atacap->serial)); 881 882 if (bootverbose) 883 printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n", 884 device_get_unit(ch->dev), 885 ata_unit2str(atadev), 886 ata_mode2str(ata_pmode(atacap)), 887 ata_mode2str(ata_wmode(atacap)), 888 ata_mode2str(ata_umode(atacap)), 889 (atacap->hwres & ATA_CABLE_ID) ? "80":"40"); 890 891 if (init) { 892 char buffer[64]; 893 894 sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision); 895 device_set_desc_copy(atadev->dev, buffer); 896 if ((atadev->param.config & ATA_PROTO_ATAPI) && 897 (atadev->param.config != ATA_CFA_MAGIC1) && 898 (atadev->param.config != ATA_CFA_MAGIC2)) { 899 if (atapi_dma && 900 (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR && 901 ata_umode(&atadev->param) >= ATA_UDMA2) 902 atadev->mode = ATA_DMA_MAX; 903 } 904 else { 905 if (ata_dma && 906 (ata_umode(&atadev->param) > 0 || 907 ata_wmode(&atadev->param) > 0)) 908 atadev->mode = ATA_DMA_MAX; 909 } 910 } 911 } 912 else { 913 if (!error) 914 error = ENXIO; 915 } 916 return error; 917 } 918 919 #ifndef ATA_CAM 920 int 921 ata_identify(device_t dev) 922 { 923 struct ata_channel *ch = device_get_softc(dev); 924 struct ata_device *atadev; 925 device_t *children; 926 device_t child, master = NULL; 927 int nchildren, i, n = ch->devices; 928 929 if (bootverbose) 930 device_printf(dev, "Identifying devices: %08x\n", ch->devices); 931 932 mtx_lock(&Giant); 933 /* Skip existing devices. */ 934 if (!device_get_children(dev, &children, &nchildren)) { 935 for (i = 0; i < nchildren; i++) { 936 if (children[i] && (atadev = device_get_softc(children[i]))) 937 n &= ~((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << atadev->unit); 938 } 939 free(children, M_TEMP); 940 } 941 /* Create new devices. */ 942 if (bootverbose) 943 device_printf(dev, "New devices: %08x\n", n); 944 if (n == 0) { 945 mtx_unlock(&Giant); 946 return (0); 947 } 948 for (i = 0; i < ATA_PM; ++i) { 949 if (n & (((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << i))) { 950 int unit = -1; 951 952 if (!(atadev = malloc(sizeof(struct ata_device), 953 M_ATA, M_NOWAIT | M_ZERO))) { 954 device_printf(dev, "out of memory\n"); 955 return ENOMEM; 956 } 957 atadev->unit = i; 958 #ifdef ATA_STATIC_ID 959 if (n & (ATA_ATA_MASTER << i)) 960 unit = (device_get_unit(dev) << 1) + i; 961 #endif 962 if ((child = ata_add_child(dev, atadev, unit))) { 963 /* 964 * PATA slave should be identified first, to allow 965 * device cable detection on master to work properly. 966 */ 967 if (i == 0 && (n & ATA_PORTMULTIPLIER) == 0 && 968 (n & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << 1)) != 0) { 969 master = child; 970 continue; 971 } 972 if (ata_getparam(atadev, 1)) { 973 device_delete_child(dev, child); 974 free(atadev, M_ATA); 975 } 976 } 977 else 978 free(atadev, M_ATA); 979 } 980 } 981 if (master) { 982 atadev = device_get_softc(master); 983 if (ata_getparam(atadev, 1)) { 984 device_delete_child(dev, master); 985 free(atadev, M_ATA); 986 } 987 } 988 bus_generic_probe(dev); 989 bus_generic_attach(dev); 990 mtx_unlock(&Giant); 991 return 0; 992 } 993 #endif 994 995 void 996 ata_default_registers(device_t dev) 997 { 998 struct ata_channel *ch = device_get_softc(dev); 999 1000 /* fill in the defaults from whats setup already */ 1001 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res; 1002 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset; 1003 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res; 1004 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset; 1005 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res; 1006 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset; 1007 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res; 1008 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset; 1009 } 1010 1011 void 1012 ata_modify_if_48bit(struct ata_request *request) 1013 { 1014 struct ata_channel *ch = device_get_softc(request->parent); 1015 struct ata_device *atadev = device_get_softc(request->dev); 1016 1017 request->flags &= ~ATA_R_48BIT; 1018 1019 if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA || 1020 request->u.ata.count > 256) && 1021 atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) { 1022 1023 /* translate command into 48bit version */ 1024 switch (request->u.ata.command) { 1025 case ATA_READ: 1026 request->u.ata.command = ATA_READ48; 1027 break; 1028 case ATA_READ_MUL: 1029 request->u.ata.command = ATA_READ_MUL48; 1030 break; 1031 case ATA_READ_DMA: 1032 if (ch->flags & ATA_NO_48BIT_DMA) { 1033 if (request->transfersize > DEV_BSIZE) 1034 request->u.ata.command = ATA_READ_MUL48; 1035 else 1036 request->u.ata.command = ATA_READ48; 1037 request->flags &= ~ATA_R_DMA; 1038 } 1039 else 1040 request->u.ata.command = ATA_READ_DMA48; 1041 break; 1042 case ATA_READ_DMA_QUEUED: 1043 if (ch->flags & ATA_NO_48BIT_DMA) { 1044 if (request->transfersize > DEV_BSIZE) 1045 request->u.ata.command = ATA_READ_MUL48; 1046 else 1047 request->u.ata.command = ATA_READ48; 1048 request->flags &= ~ATA_R_DMA; 1049 } 1050 else 1051 request->u.ata.command = ATA_READ_DMA_QUEUED48; 1052 break; 1053 case ATA_WRITE: 1054 request->u.ata.command = ATA_WRITE48; 1055 break; 1056 case ATA_WRITE_MUL: 1057 request->u.ata.command = ATA_WRITE_MUL48; 1058 break; 1059 case ATA_WRITE_DMA: 1060 if (ch->flags & ATA_NO_48BIT_DMA) { 1061 if (request->transfersize > DEV_BSIZE) 1062 request->u.ata.command = ATA_WRITE_MUL48; 1063 else 1064 request->u.ata.command = ATA_WRITE48; 1065 request->flags &= ~ATA_R_DMA; 1066 } 1067 else 1068 request->u.ata.command = ATA_WRITE_DMA48; 1069 break; 1070 case ATA_WRITE_DMA_QUEUED: 1071 if (ch->flags & ATA_NO_48BIT_DMA) { 1072 if (request->transfersize > DEV_BSIZE) 1073 request->u.ata.command = ATA_WRITE_MUL48; 1074 else 1075 request->u.ata.command = ATA_WRITE48; 1076 request->u.ata.command = ATA_WRITE48; 1077 request->flags &= ~ATA_R_DMA; 1078 } 1079 else 1080 request->u.ata.command = ATA_WRITE_DMA_QUEUED48; 1081 break; 1082 case ATA_FLUSHCACHE: 1083 request->u.ata.command = ATA_FLUSHCACHE48; 1084 break; 1085 case ATA_SET_MAX_ADDRESS: 1086 request->u.ata.command = ATA_SET_MAX_ADDRESS48; 1087 break; 1088 default: 1089 return; 1090 } 1091 request->flags |= ATA_R_48BIT; 1092 } 1093 else if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) { 1094 1095 /* translate command into 48bit version */ 1096 switch (request->u.ata.command) { 1097 case ATA_FLUSHCACHE: 1098 request->u.ata.command = ATA_FLUSHCACHE48; 1099 break; 1100 case ATA_READ_NATIVE_MAX_ADDRESS: 1101 request->u.ata.command = ATA_READ_NATIVE_MAX_ADDRESS48; 1102 break; 1103 case ATA_SET_MAX_ADDRESS: 1104 request->u.ata.command = ATA_SET_MAX_ADDRESS48; 1105 break; 1106 default: 1107 return; 1108 } 1109 request->flags |= ATA_R_48BIT; 1110 } 1111 } 1112 1113 void 1114 ata_udelay(int interval) 1115 { 1116 /* for now just use DELAY, the timer/sleep subsytems are not there yet */ 1117 if (1 || interval < (1000000/hz) || ata_delayed_attach) 1118 DELAY(interval); 1119 else 1120 pause("ataslp", interval/(1000000/hz)); 1121 } 1122 1123 char * 1124 ata_unit2str(struct ata_device *atadev) 1125 { 1126 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev)); 1127 static char str[8]; 1128 1129 if (ch->devices & ATA_PORTMULTIPLIER) 1130 sprintf(str, "port%d", atadev->unit); 1131 else 1132 sprintf(str, "%s", atadev->unit == ATA_MASTER ? "master" : "slave"); 1133 return str; 1134 } 1135 1136 const char * 1137 ata_mode2str(int mode) 1138 { 1139 switch (mode) { 1140 case -1: return "UNSUPPORTED"; 1141 case ATA_PIO0: return "PIO0"; 1142 case ATA_PIO1: return "PIO1"; 1143 case ATA_PIO2: return "PIO2"; 1144 case ATA_PIO3: return "PIO3"; 1145 case ATA_PIO4: return "PIO4"; 1146 case ATA_WDMA0: return "WDMA0"; 1147 case ATA_WDMA1: return "WDMA1"; 1148 case ATA_WDMA2: return "WDMA2"; 1149 case ATA_UDMA0: return "UDMA16"; 1150 case ATA_UDMA1: return "UDMA25"; 1151 case ATA_UDMA2: return "UDMA33"; 1152 case ATA_UDMA3: return "UDMA40"; 1153 case ATA_UDMA4: return "UDMA66"; 1154 case ATA_UDMA5: return "UDMA100"; 1155 case ATA_UDMA6: return "UDMA133"; 1156 case ATA_SA150: return "SATA150"; 1157 case ATA_SA300: return "SATA300"; 1158 default: 1159 if (mode & ATA_DMA_MASK) 1160 return "BIOSDMA"; 1161 else 1162 return "BIOSPIO"; 1163 } 1164 } 1165 1166 const char * 1167 ata_satarev2str(int rev) 1168 { 1169 switch (rev) { 1170 case 0: return ""; 1171 case 1: return "SATA 1.5Gb/s"; 1172 case 2: return "SATA 3Gb/s"; 1173 case 3: return "SATA 6Gb/s"; 1174 case 0xff: return "SATA"; 1175 default: return "???"; 1176 } 1177 } 1178 1179 int 1180 ata_atapi(device_t dev, int target) 1181 { 1182 struct ata_channel *ch = device_get_softc(dev); 1183 1184 return (ch->devices & (ATA_ATAPI_MASTER << target)); 1185 } 1186 1187 int 1188 ata_pmode(struct ata_params *ap) 1189 { 1190 if (ap->atavalid & ATA_FLAG_64_70) { 1191 if (ap->apiomodes & 0x02) 1192 return ATA_PIO4; 1193 if (ap->apiomodes & 0x01) 1194 return ATA_PIO3; 1195 } 1196 if (ap->mwdmamodes & 0x04) 1197 return ATA_PIO4; 1198 if (ap->mwdmamodes & 0x02) 1199 return ATA_PIO3; 1200 if (ap->mwdmamodes & 0x01) 1201 return ATA_PIO2; 1202 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200) 1203 return ATA_PIO2; 1204 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100) 1205 return ATA_PIO1; 1206 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000) 1207 return ATA_PIO0; 1208 return ATA_PIO0; 1209 } 1210 1211 int 1212 ata_wmode(struct ata_params *ap) 1213 { 1214 if (ap->mwdmamodes & 0x04) 1215 return ATA_WDMA2; 1216 if (ap->mwdmamodes & 0x02) 1217 return ATA_WDMA1; 1218 if (ap->mwdmamodes & 0x01) 1219 return ATA_WDMA0; 1220 return -1; 1221 } 1222 1223 int 1224 ata_umode(struct ata_params *ap) 1225 { 1226 if (ap->atavalid & ATA_FLAG_88) { 1227 if (ap->udmamodes & 0x40) 1228 return ATA_UDMA6; 1229 if (ap->udmamodes & 0x20) 1230 return ATA_UDMA5; 1231 if (ap->udmamodes & 0x10) 1232 return ATA_UDMA4; 1233 if (ap->udmamodes & 0x08) 1234 return ATA_UDMA3; 1235 if (ap->udmamodes & 0x04) 1236 return ATA_UDMA2; 1237 if (ap->udmamodes & 0x02) 1238 return ATA_UDMA1; 1239 if (ap->udmamodes & 0x01) 1240 return ATA_UDMA0; 1241 } 1242 return -1; 1243 } 1244 1245 int 1246 ata_limit_mode(device_t dev, int mode, int maxmode) 1247 { 1248 struct ata_device *atadev = device_get_softc(dev); 1249 1250 if (maxmode && mode > maxmode) 1251 mode = maxmode; 1252 1253 if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0) 1254 return min(mode, ata_umode(&atadev->param)); 1255 1256 if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0) 1257 return min(mode, ata_wmode(&atadev->param)); 1258 1259 if (mode > ata_pmode(&atadev->param)) 1260 return min(mode, ata_pmode(&atadev->param)); 1261 1262 return mode; 1263 } 1264 1265 static void 1266 bswap(int8_t *buf, int len) 1267 { 1268 u_int16_t *ptr = (u_int16_t*)(buf + len); 1269 1270 while (--ptr >= (u_int16_t*)buf) 1271 *ptr = ntohs(*ptr); 1272 } 1273 1274 static void 1275 btrim(int8_t *buf, int len) 1276 { 1277 int8_t *ptr; 1278 1279 for (ptr = buf; ptr < buf+len; ++ptr) 1280 if (!*ptr || *ptr == '_') 1281 *ptr = ' '; 1282 for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr) 1283 *ptr = 0; 1284 } 1285 1286 static void 1287 bpack(int8_t *src, int8_t *dst, int len) 1288 { 1289 int i, j, blank; 1290 1291 for (i = j = blank = 0 ; i < len; i++) { 1292 if (blank && src[i] == ' ') continue; 1293 if (blank && src[i] != ' ') { 1294 dst[j++] = src[i]; 1295 blank = 0; 1296 continue; 1297 } 1298 if (src[i] == ' ') { 1299 blank = 1; 1300 if (i == 0) 1301 continue; 1302 } 1303 dst[j++] = src[i]; 1304 } 1305 if (j < len) 1306 dst[j] = 0x00; 1307 } 1308 1309 #ifdef ATA_CAM 1310 void 1311 ata_cam_begin_transaction(device_t dev, union ccb *ccb) 1312 { 1313 struct ata_channel *ch = device_get_softc(dev); 1314 struct ata_request *request; 1315 1316 if (!(request = ata_alloc_request())) { 1317 device_printf(dev, "FAILURE - out of memory in start\n"); 1318 ccb->ccb_h.status = CAM_REQ_INVALID; 1319 xpt_done(ccb); 1320 return; 1321 } 1322 bzero(request, sizeof(*request)); 1323 1324 /* setup request */ 1325 request->dev = NULL; 1326 request->parent = dev; 1327 request->unit = ccb->ccb_h.target_id; 1328 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1329 request->data = ccb->ataio.data_ptr; 1330 request->bytecount = ccb->ataio.dxfer_len; 1331 request->u.ata.command = ccb->ataio.cmd.command; 1332 request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) | 1333 (uint16_t)ccb->ataio.cmd.features; 1334 request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) | 1335 (uint16_t)ccb->ataio.cmd.sector_count; 1336 if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) { 1337 request->flags |= ATA_R_48BIT; 1338 request->u.ata.lba = 1339 ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) | 1340 ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) | 1341 ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24); 1342 } else { 1343 request->u.ata.lba = 1344 ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24); 1345 } 1346 request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) | 1347 ((uint64_t)ccb->ataio.cmd.lba_mid << 8) | 1348 (uint64_t)ccb->ataio.cmd.lba_low; 1349 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1350 ccb->ataio.cmd.flags & CAM_ATAIO_DMA) 1351 request->flags |= ATA_R_DMA; 1352 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1353 request->flags |= ATA_R_READ; 1354 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 1355 request->flags |= ATA_R_WRITE; 1356 } else { 1357 request->data = ccb->csio.data_ptr; 1358 request->bytecount = ccb->csio.dxfer_len; 1359 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 1360 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, 1361 request->u.atapi.ccb, ccb->csio.cdb_len); 1362 request->flags |= ATA_R_ATAPI; 1363 if (ch->curr[ccb->ccb_h.target_id].atapi == 16) 1364 request->flags |= ATA_R_ATAPI16; 1365 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1366 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 1367 request->flags |= ATA_R_DMA; 1368 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1369 request->flags |= ATA_R_READ; 1370 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 1371 request->flags |= ATA_R_WRITE; 1372 } 1373 request->transfersize = min(request->bytecount, 1374 ch->curr[ccb->ccb_h.target_id].bytecount); 1375 request->retries = 0; 1376 request->timeout = (ccb->ccb_h.timeout + 999) / 1000; 1377 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); 1378 request->ccb = ccb; 1379 1380 ch->running = request; 1381 ch->state = ATA_ACTIVE; 1382 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { 1383 ch->running = NULL; 1384 ch->state = ATA_IDLE; 1385 ata_cam_end_transaction(dev, request); 1386 return; 1387 } 1388 } 1389 1390 void 1391 ata_cam_end_transaction(device_t dev, struct ata_request *request) 1392 { 1393 struct ata_channel *ch = device_get_softc(dev); 1394 union ccb *ccb = request->ccb; 1395 int fatalerr = 0; 1396 1397 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1398 if (request->flags & ATA_R_TIMEOUT) { 1399 xpt_freeze_simq(ch->sim, 1); 1400 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1401 ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ; 1402 fatalerr = 1; 1403 } else if (request->status & ATA_S_ERROR) { 1404 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1405 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; 1406 } else { 1407 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1408 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1409 } 1410 } else if (request->result == ERESTART) 1411 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1412 else if (request->result != 0) 1413 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 1414 else 1415 ccb->ccb_h.status |= CAM_REQ_CMP; 1416 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && 1417 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 1418 xpt_freeze_devq(ccb->ccb_h.path, 1); 1419 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1420 } 1421 if (ccb->ccb_h.func_code == XPT_ATA_IO && 1422 ((request->status & ATA_S_ERROR) || 1423 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) { 1424 struct ata_res *res = &ccb->ataio.res; 1425 res->status = request->status; 1426 res->error = request->error; 1427 res->lba_low = request->u.ata.lba; 1428 res->lba_mid = request->u.ata.lba >> 8; 1429 res->lba_high = request->u.ata.lba >> 16; 1430 res->device = request->u.ata.lba >> 24; 1431 res->lba_low_exp = request->u.ata.lba >> 24; 1432 res->lba_mid_exp = request->u.ata.lba >> 32; 1433 res->lba_high_exp = request->u.ata.lba >> 40; 1434 res->sector_count = request->u.ata.count; 1435 res->sector_count_exp = request->u.ata.count >> 8; 1436 } 1437 ata_free_request(request); 1438 xpt_done(ccb); 1439 /* Do error recovery if needed. */ 1440 if (fatalerr) 1441 ata_reinit(dev); 1442 } 1443 1444 static int 1445 ata_check_ids(device_t dev, union ccb *ccb) 1446 { 1447 struct ata_channel *ch = device_get_softc(dev); 1448 1449 if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) { 1450 ccb->ccb_h.status = CAM_TID_INVALID; 1451 xpt_done(ccb); 1452 return (-1); 1453 } 1454 if (ccb->ccb_h.target_lun != 0) { 1455 ccb->ccb_h.status = CAM_LUN_INVALID; 1456 xpt_done(ccb); 1457 return (-1); 1458 } 1459 return (0); 1460 } 1461 1462 static void 1463 ataaction(struct cam_sim *sim, union ccb *ccb) 1464 { 1465 device_t dev; 1466 struct ata_channel *ch; 1467 1468 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n", 1469 ccb->ccb_h.func_code)); 1470 1471 ch = (struct ata_channel *)cam_sim_softc(sim); 1472 dev = ch->dev; 1473 switch (ccb->ccb_h.func_code) { 1474 /* Common cases first */ 1475 case XPT_ATA_IO: /* Execute the requested I/O operation */ 1476 case XPT_SCSI_IO: 1477 if (ata_check_ids(dev, ccb)) 1478 return; 1479 if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) 1480 << ccb->ccb_h.target_id)) == 0) { 1481 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1482 break; 1483 } 1484 if (ch->running) 1485 device_printf(dev, "already running!\n"); 1486 if (ccb->ccb_h.func_code == XPT_ATA_IO && 1487 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1488 (ccb->ataio.cmd.control & ATA_A_RESET)) { 1489 struct ata_res *res = &ccb->ataio.res; 1490 1491 bzero(res, sizeof(*res)); 1492 if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) { 1493 res->lba_high = 0; 1494 res->lba_mid = 0; 1495 } else { 1496 res->lba_high = 0xeb; 1497 res->lba_mid = 0x14; 1498 } 1499 ccb->ccb_h.status = CAM_REQ_CMP; 1500 break; 1501 } 1502 ata_cam_begin_transaction(dev, ccb); 1503 return; 1504 case XPT_EN_LUN: /* Enable LUN as a target */ 1505 case XPT_TARGET_IO: /* Execute target I/O request */ 1506 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 1507 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ 1508 case XPT_ABORT: /* Abort the specified CCB */ 1509 /* XXX Implement */ 1510 ccb->ccb_h.status = CAM_REQ_INVALID; 1511 break; 1512 case XPT_SET_TRAN_SETTINGS: 1513 { 1514 struct ccb_trans_settings *cts = &ccb->cts; 1515 struct ata_cam_device *d; 1516 1517 if (ata_check_ids(dev, ccb)) 1518 return; 1519 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 1520 d = &ch->curr[ccb->ccb_h.target_id]; 1521 else 1522 d = &ch->user[ccb->ccb_h.target_id]; 1523 if (ch->flags & ATA_SATA) { 1524 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) 1525 d->revision = cts->xport_specific.sata.revision; 1526 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) { 1527 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1528 d->mode = ATA_SETMODE(ch->dev, 1529 ccb->ccb_h.target_id, 1530 cts->xport_specific.sata.mode); 1531 } else 1532 d->mode = cts->xport_specific.sata.mode; 1533 } 1534 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) 1535 d->bytecount = min(8192, cts->xport_specific.sata.bytecount); 1536 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) 1537 d->atapi = cts->xport_specific.sata.atapi; 1538 } else { 1539 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) { 1540 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1541 d->mode = ATA_SETMODE(ch->dev, 1542 ccb->ccb_h.target_id, 1543 cts->xport_specific.ata.mode); 1544 } else 1545 d->mode = cts->xport_specific.ata.mode; 1546 } 1547 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) 1548 d->bytecount = cts->xport_specific.ata.bytecount; 1549 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI) 1550 d->atapi = cts->xport_specific.ata.atapi; 1551 } 1552 ccb->ccb_h.status = CAM_REQ_CMP; 1553 break; 1554 } 1555 case XPT_GET_TRAN_SETTINGS: 1556 { 1557 struct ccb_trans_settings *cts = &ccb->cts; 1558 struct ata_cam_device *d; 1559 1560 if (ata_check_ids(dev, ccb)) 1561 return; 1562 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 1563 d = &ch->curr[ccb->ccb_h.target_id]; 1564 else 1565 d = &ch->user[ccb->ccb_h.target_id]; 1566 cts->protocol = PROTO_ATA; 1567 cts->protocol_version = PROTO_VERSION_UNSPECIFIED; 1568 if (ch->flags & ATA_SATA) { 1569 cts->transport = XPORT_SATA; 1570 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 1571 cts->xport_specific.sata.valid = 0; 1572 cts->xport_specific.sata.mode = d->mode; 1573 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; 1574 cts->xport_specific.sata.bytecount = d->bytecount; 1575 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; 1576 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1577 cts->xport_specific.sata.revision = 1578 ATA_GETREV(dev, ccb->ccb_h.target_id); 1579 if (cts->xport_specific.sata.revision != 0xff) { 1580 cts->xport_specific.sata.valid |= 1581 CTS_SATA_VALID_REVISION; 1582 } 1583 } else { 1584 cts->xport_specific.sata.revision = d->revision; 1585 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; 1586 } 1587 cts->xport_specific.sata.atapi = d->atapi; 1588 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; 1589 } else { 1590 cts->transport = XPORT_ATA; 1591 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 1592 cts->xport_specific.ata.valid = 0; 1593 cts->xport_specific.ata.mode = d->mode; 1594 cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE; 1595 cts->xport_specific.ata.bytecount = d->bytecount; 1596 cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT; 1597 cts->xport_specific.ata.atapi = d->atapi; 1598 cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI; 1599 } 1600 ccb->ccb_h.status = CAM_REQ_CMP; 1601 break; 1602 } 1603 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 1604 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 1605 ata_reinit(dev); 1606 ccb->ccb_h.status = CAM_REQ_CMP; 1607 break; 1608 case XPT_TERM_IO: /* Terminate the I/O process */ 1609 /* XXX Implement */ 1610 ccb->ccb_h.status = CAM_REQ_INVALID; 1611 break; 1612 case XPT_PATH_INQ: /* Path routing inquiry */ 1613 { 1614 struct ccb_pathinq *cpi = &ccb->cpi; 1615 1616 cpi->version_num = 1; /* XXX??? */ 1617 cpi->hba_inquiry = PI_SDTR_ABLE; 1618 cpi->target_sprt = 0; 1619 cpi->hba_misc = PIM_SEQSCAN; 1620 cpi->hba_eng_cnt = 0; 1621 if (ch->flags & ATA_NO_SLAVE) 1622 cpi->max_target = 0; 1623 else 1624 cpi->max_target = 1; 1625 cpi->max_lun = 0; 1626 cpi->initiator_id = 0; 1627 cpi->bus_id = cam_sim_bus(sim); 1628 if (ch->flags & ATA_SATA) 1629 cpi->base_transfer_speed = 150000; 1630 else 1631 cpi->base_transfer_speed = 3300; 1632 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1633 strncpy(cpi->hba_vid, "ATA", HBA_IDLEN); 1634 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1635 cpi->unit_number = cam_sim_unit(sim); 1636 if (ch->flags & ATA_SATA) 1637 cpi->transport = XPORT_SATA; 1638 else 1639 cpi->transport = XPORT_ATA; 1640 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 1641 cpi->protocol = PROTO_ATA; 1642 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 1643 cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS; 1644 cpi->ccb_h.status = CAM_REQ_CMP; 1645 break; 1646 } 1647 default: 1648 ccb->ccb_h.status = CAM_REQ_INVALID; 1649 break; 1650 } 1651 xpt_done(ccb); 1652 } 1653 1654 static void 1655 atapoll(struct cam_sim *sim) 1656 { 1657 struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim); 1658 1659 ata_interrupt_locked(ch); 1660 } 1661 #endif 1662 1663 /* 1664 * module handeling 1665 */ 1666 static int 1667 ata_module_event_handler(module_t mod, int what, void *arg) 1668 { 1669 #ifndef ATA_CAM 1670 static struct cdev *atacdev; 1671 #endif 1672 1673 switch (what) { 1674 case MOD_LOAD: 1675 #ifndef ATA_CAM 1676 /* register controlling device */ 1677 atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata"); 1678 1679 if (cold) { 1680 /* register boot attach to be run when interrupts are enabled */ 1681 if (!(ata_delayed_attach = (struct intr_config_hook *) 1682 malloc(sizeof(struct intr_config_hook), 1683 M_TEMP, M_NOWAIT | M_ZERO))) { 1684 printf("ata: malloc of delayed attach hook failed\n"); 1685 return EIO; 1686 } 1687 ata_delayed_attach->ich_func = (void*)ata_boot_attach; 1688 if (config_intrhook_establish(ata_delayed_attach) != 0) { 1689 printf("ata: config_intrhook_establish failed\n"); 1690 free(ata_delayed_attach, M_TEMP); 1691 } 1692 } 1693 #endif 1694 return 0; 1695 1696 case MOD_UNLOAD: 1697 #ifndef ATA_CAM 1698 /* deregister controlling device */ 1699 destroy_dev(atacdev); 1700 #endif 1701 return 0; 1702 1703 default: 1704 return EOPNOTSUPP; 1705 } 1706 } 1707 1708 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL }; 1709 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 1710 MODULE_VERSION(ata, 1); 1711 #ifdef ATA_CAM 1712 MODULE_DEPEND(ata, cam, 1, 1, 1); 1713 #endif 1714 1715 static void 1716 ata_init(void) 1717 { 1718 ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request), 1719 NULL, NULL, NULL, NULL, 0, 0); 1720 ata_composite_zone = uma_zcreate("ata_composite", 1721 sizeof(struct ata_composite), 1722 NULL, NULL, NULL, NULL, 0, 0); 1723 } 1724 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL); 1725 1726 static void 1727 ata_uninit(void) 1728 { 1729 uma_zdestroy(ata_composite_zone); 1730 uma_zdestroy(ata_request_zone); 1731 } 1732 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL); 1733