1 /*- 2 * Copyright (c) 1998 - 2007 S�ren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ata.h" 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/ata.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/endian.h> 37 #include <sys/ctype.h> 38 #include <sys/conf.h> 39 #include <sys/bus.h> 40 #include <sys/bio.h> 41 #include <sys/malloc.h> 42 #include <sys/sysctl.h> 43 #include <sys/sema.h> 44 #include <sys/taskqueue.h> 45 #include <vm/uma.h> 46 #include <machine/stdarg.h> 47 #include <machine/resource.h> 48 #include <machine/bus.h> 49 #include <sys/rman.h> 50 #include <dev/ata/ata-all.h> 51 #include <ata_if.h> 52 53 /* device structure */ 54 static d_ioctl_t ata_ioctl; 55 static struct cdevsw ata_cdevsw = { 56 .d_version = D_VERSION, 57 .d_flags = D_NEEDGIANT, /* we need this as newbus isn't mpsafe */ 58 .d_ioctl = ata_ioctl, 59 .d_name = "ata", 60 }; 61 62 /* prototypes */ 63 static void ata_boot_attach(void); 64 static device_t ata_add_child(device_t, struct ata_device *, int); 65 static int ata_getparam(struct ata_device *, int); 66 static void bswap(int8_t *, int); 67 static void btrim(int8_t *, int); 68 static void bpack(int8_t *, int8_t *, int); 69 70 /* global vars */ 71 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer"); 72 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL; 73 struct intr_config_hook *ata_delayed_attach = NULL; 74 devclass_t ata_devclass; 75 uma_zone_t ata_request_zone; 76 uma_zone_t ata_composite_zone; 77 int ata_wc = 1; 78 79 /* local vars */ 80 static int ata_dma = 1; 81 static int atapi_dma = 1; 82 83 /* sysctl vars */ 84 SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); 85 TUNABLE_INT("hw.ata.ata_dma", &ata_dma); 86 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0, 87 "ATA disk DMA mode control"); 88 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma); 89 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0, 90 "ATAPI device DMA mode control"); 91 TUNABLE_INT("hw.ata.wc", &ata_wc); 92 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0, 93 "ATA disk write caching"); 94 95 /* 96 * newbus device interface related functions 97 */ 98 int 99 ata_probe(device_t dev) 100 { 101 return 0; 102 } 103 104 int 105 ata_attach(device_t dev) 106 { 107 struct ata_channel *ch = device_get_softc(dev); 108 int error, rid; 109 110 /* check that we have a virgin channel to attach */ 111 if (ch->r_irq) 112 return EEXIST; 113 114 /* initialize the softc basics */ 115 ch->dev = dev; 116 ch->state = ATA_IDLE; 117 bzero(&ch->state_mtx, sizeof(struct mtx)); 118 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF); 119 bzero(&ch->queue_mtx, sizeof(struct mtx)); 120 mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF); 121 TAILQ_INIT(&ch->ata_queue); 122 123 /* reset the controller HW, the channel and device(s) */ 124 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) 125 pause("ataatch", 1); 126 ATA_RESET(dev); 127 ATA_LOCKING(dev, ATA_LF_UNLOCK); 128 129 /* setup interrupt delivery */ 130 rid = ATA_IRQ_RID; 131 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 132 RF_SHAREABLE | RF_ACTIVE); 133 if (!ch->r_irq) { 134 device_printf(dev, "unable to allocate interrupt\n"); 135 return ENXIO; 136 } 137 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 138 (driver_intr_t *)ata_interrupt, ch, &ch->ih))) { 139 device_printf(dev, "unable to setup interrupt\n"); 140 return error; 141 } 142 143 /* probe and attach devices on this channel unless we are in early boot */ 144 if (!ata_delayed_attach) 145 ata_identify(dev); 146 return 0; 147 } 148 149 int 150 ata_detach(device_t dev) 151 { 152 struct ata_channel *ch = device_get_softc(dev); 153 device_t *children; 154 int nchildren, i; 155 156 /* check that we have a valid channel to detach */ 157 if (!ch->r_irq) 158 return ENXIO; 159 160 /* grap the channel lock so no new requests gets launched */ 161 mtx_lock(&ch->state_mtx); 162 ch->state |= ATA_STALL_QUEUE; 163 mtx_unlock(&ch->state_mtx); 164 165 /* detach & delete all children */ 166 if (!device_get_children(dev, &children, &nchildren)) { 167 for (i = 0; i < nchildren; i++) 168 if (children[i]) 169 device_delete_child(dev, children[i]); 170 free(children, M_TEMP); 171 } 172 173 /* release resources */ 174 bus_teardown_intr(dev, ch->r_irq, ch->ih); 175 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 176 ch->r_irq = NULL; 177 mtx_destroy(&ch->state_mtx); 178 mtx_destroy(&ch->queue_mtx); 179 return 0; 180 } 181 182 int 183 ata_reinit(device_t dev) 184 { 185 struct ata_channel *ch = device_get_softc(dev); 186 struct ata_request *request; 187 device_t *children; 188 int nchildren, i; 189 190 /* check that we have a valid channel to reinit */ 191 if (!ch || !ch->r_irq) 192 return ENXIO; 193 194 if (bootverbose) 195 device_printf(dev, "reiniting channel ..\n"); 196 197 /* poll for locking the channel */ 198 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) 199 pause("atarini", 1); 200 201 /* catch eventual request in ch->running */ 202 mtx_lock(&ch->state_mtx); 203 if ((request = ch->running)) 204 callout_stop(&request->callout); 205 ch->running = NULL; 206 207 /* unconditionally grap the channel lock */ 208 ch->state |= ATA_STALL_QUEUE; 209 mtx_unlock(&ch->state_mtx); 210 211 /* reset the controller HW, the channel and device(s) */ 212 ATA_RESET(dev); 213 214 /* reinit the children and delete any that fails */ 215 if (!device_get_children(dev, &children, &nchildren)) { 216 mtx_lock(&Giant); /* newbus suckage it needs Giant */ 217 for (i = 0; i < nchildren; i++) { 218 /* did any children go missing ? */ 219 if (children[i] && device_is_attached(children[i]) && 220 ATA_REINIT(children[i])) { 221 /* 222 * if we had a running request and its device matches 223 * this child we need to inform the request that the 224 * device is gone. 225 */ 226 if (request && request->dev == children[i]) { 227 request->result = ENXIO; 228 device_printf(request->dev, "FAILURE - device detached\n"); 229 230 /* if not timeout finish request here */ 231 if (!(request->flags & ATA_R_TIMEOUT)) 232 ata_finish(request); 233 request = NULL; 234 } 235 device_delete_child(dev, children[i]); 236 } 237 } 238 free(children, M_TEMP); 239 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ 240 } 241 242 /* if we still have a good request put it on the queue again */ 243 if (request && !(request->flags & ATA_R_TIMEOUT)) { 244 device_printf(request->dev, 245 "WARNING - %s requeued due to channel reset", 246 ata_cmd2str(request)); 247 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) 248 printf(" LBA=%ju", request->u.ata.lba); 249 printf("\n"); 250 request->flags |= ATA_R_REQUEUE; 251 ata_queue_request(request); 252 } 253 254 /* we're done release the channel for new work */ 255 mtx_lock(&ch->state_mtx); 256 ch->state = ATA_IDLE; 257 mtx_unlock(&ch->state_mtx); 258 ATA_LOCKING(dev, ATA_LF_UNLOCK); 259 260 if (bootverbose) 261 device_printf(dev, "reinit done ..\n"); 262 263 /* kick off requests on the queue */ 264 ata_start(dev); 265 return 0; 266 } 267 268 int 269 ata_suspend(device_t dev) 270 { 271 struct ata_channel *ch; 272 273 /* check for valid device */ 274 if (!dev || !(ch = device_get_softc(dev))) 275 return ENXIO; 276 277 /* wait for the channel to be IDLE or detached before suspending */ 278 while (ch->r_irq) { 279 mtx_lock(&ch->state_mtx); 280 if (ch->state == ATA_IDLE) { 281 ch->state = ATA_ACTIVE; 282 mtx_unlock(&ch->state_mtx); 283 break; 284 } 285 mtx_unlock(&ch->state_mtx); 286 tsleep(ch, PRIBIO, "atasusp", hz/10); 287 } 288 ATA_LOCKING(dev, ATA_LF_UNLOCK); 289 return 0; 290 } 291 292 int 293 ata_resume(device_t dev) 294 { 295 struct ata_channel *ch; 296 int error; 297 298 /* check for valid device */ 299 if (!dev || !(ch = device_get_softc(dev))) 300 return ENXIO; 301 302 /* reinit the devices, we dont know what mode/state they are in */ 303 error = ata_reinit(dev); 304 305 /* kick off requests on the queue */ 306 ata_start(dev); 307 return error; 308 } 309 310 int 311 ata_interrupt(void *data) 312 { 313 struct ata_channel *ch = (struct ata_channel *)data; 314 struct ata_request *request; 315 316 mtx_lock(&ch->state_mtx); 317 do { 318 /* ignore interrupt if its not for us */ 319 if (ch->hw.status && !ch->hw.status(ch->dev)) 320 break; 321 322 /* do we have a running request */ 323 if (!(request = ch->running)) 324 break; 325 326 ATA_DEBUG_RQ(request, "interrupt"); 327 328 /* safetycheck for the right state */ 329 if (ch->state == ATA_IDLE) { 330 device_printf(request->dev, "interrupt on idle channel ignored\n"); 331 break; 332 } 333 334 /* 335 * we have the HW locks, so end the transaction for this request 336 * if it finishes immediately otherwise wait for next interrupt 337 */ 338 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) { 339 ch->running = NULL; 340 if (ch->state == ATA_ACTIVE) 341 ch->state = ATA_IDLE; 342 mtx_unlock(&ch->state_mtx); 343 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK); 344 ata_finish(request); 345 return 1; 346 } 347 } while (0); 348 mtx_unlock(&ch->state_mtx); 349 return 0; 350 } 351 352 /* 353 * device related interfaces 354 */ 355 static int 356 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data, 357 int32_t flag, struct thread *td) 358 { 359 device_t device, *children; 360 struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data; 361 int *value = (int *)data; 362 int i, nchildren, error = ENOTTY; 363 364 switch (cmd) { 365 case IOCATAGMAXCHANNEL: 366 *value = devclass_get_maxunit(ata_devclass); 367 error = 0; 368 break; 369 370 case IOCATAREINIT: 371 if (*value > devclass_get_maxunit(ata_devclass) || 372 !(device = devclass_get_device(ata_devclass, *value))) 373 return ENXIO; 374 error = ata_reinit(device); 375 ata_start(device); 376 break; 377 378 case IOCATAATTACH: 379 if (*value > devclass_get_maxunit(ata_devclass) || 380 !(device = devclass_get_device(ata_devclass, *value))) 381 return ENXIO; 382 /* XXX SOS should enable channel HW on controller */ 383 error = ata_attach(device); 384 break; 385 386 case IOCATADETACH: 387 if (*value > devclass_get_maxunit(ata_devclass) || 388 !(device = devclass_get_device(ata_devclass, *value))) 389 return ENXIO; 390 error = ata_detach(device); 391 /* XXX SOS should disable channel HW on controller */ 392 break; 393 394 case IOCATADEVICES: 395 if (devices->channel > devclass_get_maxunit(ata_devclass) || 396 !(device = devclass_get_device(ata_devclass, devices->channel))) 397 return ENXIO; 398 bzero(devices->name[0], 32); 399 bzero(&devices->params[0], sizeof(struct ata_params)); 400 bzero(devices->name[1], 32); 401 bzero(&devices->params[1], sizeof(struct ata_params)); 402 if (!device_get_children(device, &children, &nchildren)) { 403 for (i = 0; i < nchildren; i++) { 404 if (children[i] && device_is_attached(children[i])) { 405 struct ata_device *atadev = device_get_softc(children[i]); 406 407 if (atadev->unit == ATA_MASTER) { 408 strncpy(devices->name[0], 409 device_get_nameunit(children[i]), 32); 410 bcopy(&atadev->param, &devices->params[0], 411 sizeof(struct ata_params)); 412 } 413 if (atadev->unit == ATA_SLAVE) { 414 strncpy(devices->name[1], 415 device_get_nameunit(children[i]), 32); 416 bcopy(&atadev->param, &devices->params[1], 417 sizeof(struct ata_params)); 418 } 419 } 420 } 421 free(children, M_TEMP); 422 error = 0; 423 } 424 else 425 error = ENODEV; 426 break; 427 428 default: 429 if (ata_raid_ioctl_func) 430 error = ata_raid_ioctl_func(cmd, data); 431 } 432 return error; 433 } 434 435 int 436 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data) 437 { 438 struct ata_device *atadev = device_get_softc(dev); 439 struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data; 440 struct ata_params *params = (struct ata_params *)data; 441 int *mode = (int *)data; 442 struct ata_request *request; 443 caddr_t buf; 444 int error; 445 446 switch (cmd) { 447 case IOCATAREQUEST: 448 if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) { 449 return ENOMEM; 450 } 451 if (!(request = ata_alloc_request())) { 452 free(buf, M_ATA); 453 return ENOMEM; 454 } 455 if (ioc_request->flags & ATA_CMD_WRITE) { 456 error = copyin(ioc_request->data, buf, ioc_request->count); 457 if (error) { 458 free(buf, M_ATA); 459 ata_free_request(request); 460 return error; 461 } 462 } 463 request->dev = dev; 464 if (ioc_request->flags & ATA_CMD_ATAPI) { 465 request->flags = ATA_R_ATAPI; 466 bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16); 467 } 468 else { 469 request->u.ata.command = ioc_request->u.ata.command; 470 request->u.ata.feature = ioc_request->u.ata.feature; 471 request->u.ata.lba = ioc_request->u.ata.lba; 472 request->u.ata.count = ioc_request->u.ata.count; 473 } 474 request->timeout = ioc_request->timeout; 475 request->data = buf; 476 request->bytecount = ioc_request->count; 477 request->transfersize = request->bytecount; 478 if (ioc_request->flags & ATA_CMD_CONTROL) 479 request->flags |= ATA_R_CONTROL; 480 if (ioc_request->flags & ATA_CMD_READ) 481 request->flags |= ATA_R_READ; 482 if (ioc_request->flags & ATA_CMD_WRITE) 483 request->flags |= ATA_R_WRITE; 484 ata_queue_request(request); 485 if (request->flags & ATA_R_ATAPI) { 486 bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense, 487 sizeof(struct atapi_sense)); 488 } 489 else { 490 ioc_request->u.ata.command = request->u.ata.command; 491 ioc_request->u.ata.feature = request->u.ata.feature; 492 ioc_request->u.ata.lba = request->u.ata.lba; 493 ioc_request->u.ata.count = request->u.ata.count; 494 } 495 ioc_request->error = request->result; 496 if (ioc_request->flags & ATA_CMD_READ) 497 error = copyout(buf, ioc_request->data, ioc_request->count); 498 else 499 error = 0; 500 free(buf, M_ATA); 501 ata_free_request(request); 502 return error; 503 504 case IOCATAGPARM: 505 ata_getparam(atadev, 0); 506 bcopy(&atadev->param, params, sizeof(struct ata_params)); 507 return 0; 508 509 case IOCATASMODE: 510 atadev->mode = *mode; 511 ATA_SETMODE(device_get_parent(dev), dev); 512 return 0; 513 514 case IOCATAGMODE: 515 *mode = atadev->mode; 516 return 0; 517 default: 518 return ENOTTY; 519 } 520 } 521 522 static void 523 ata_boot_attach(void) 524 { 525 struct ata_channel *ch; 526 int ctlr; 527 528 mtx_lock(&Giant); /* newbus suckage it needs Giant */ 529 530 /* kick of probe and attach on all channels */ 531 for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) { 532 if ((ch = devclass_get_softc(ata_devclass, ctlr))) { 533 ata_identify(ch->dev); 534 } 535 } 536 537 /* release the hook that got us here, we are only needed once during boot */ 538 if (ata_delayed_attach) { 539 config_intrhook_disestablish(ata_delayed_attach); 540 free(ata_delayed_attach, M_TEMP); 541 ata_delayed_attach = NULL; 542 } 543 544 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ 545 } 546 547 548 /* 549 * misc support functions 550 */ 551 static device_t 552 ata_add_child(device_t parent, struct ata_device *atadev, int unit) 553 { 554 device_t child; 555 556 if ((child = device_add_child(parent, NULL, unit))) { 557 device_set_softc(child, atadev); 558 device_quiet(child); 559 atadev->dev = child; 560 atadev->max_iosize = DEV_BSIZE; 561 atadev->mode = ATA_PIO_MAX; 562 } 563 return child; 564 } 565 566 static int 567 ata_getparam(struct ata_device *atadev, int init) 568 { 569 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev)); 570 struct ata_request *request; 571 u_int8_t command = 0; 572 int error = ENOMEM, retries = 2; 573 574 if (ch->devices & 575 (atadev->unit == ATA_MASTER ? ATA_ATA_MASTER : ATA_ATA_SLAVE)) 576 command = ATA_ATA_IDENTIFY; 577 if (ch->devices & 578 (atadev->unit == ATA_MASTER ? ATA_ATAPI_MASTER : ATA_ATAPI_SLAVE)) 579 command = ATA_ATAPI_IDENTIFY; 580 if (!command) 581 return ENXIO; 582 583 while (retries-- > 0 && error) { 584 if (!(request = ata_alloc_request())) 585 break; 586 request->dev = atadev->dev; 587 request->timeout = 1; 588 request->retries = 0; 589 request->u.ata.command = command; 590 request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT|ATA_R_QUIET); 591 request->data = (void *)&atadev->param; 592 request->bytecount = sizeof(struct ata_params); 593 request->donecount = 0; 594 request->transfersize = DEV_BSIZE; 595 ata_queue_request(request); 596 error = request->result; 597 ata_free_request(request); 598 } 599 600 if (!error && (isprint(atadev->param.model[0]) || 601 isprint(atadev->param.model[1]))) { 602 struct ata_params *atacap = &atadev->param; 603 char buffer[64]; 604 int16_t *ptr; 605 606 for (ptr = (int16_t *)atacap; 607 ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) { 608 *ptr = le16toh(*ptr); 609 } 610 if (!(!strncmp(atacap->model, "FX", 2) || 611 !strncmp(atacap->model, "NEC", 3) || 612 !strncmp(atacap->model, "Pioneer", 7) || 613 !strncmp(atacap->model, "SHARP", 5))) { 614 bswap(atacap->model, sizeof(atacap->model)); 615 bswap(atacap->revision, sizeof(atacap->revision)); 616 bswap(atacap->serial, sizeof(atacap->serial)); 617 } 618 btrim(atacap->model, sizeof(atacap->model)); 619 bpack(atacap->model, atacap->model, sizeof(atacap->model)); 620 btrim(atacap->revision, sizeof(atacap->revision)); 621 bpack(atacap->revision, atacap->revision, sizeof(atacap->revision)); 622 btrim(atacap->serial, sizeof(atacap->serial)); 623 bpack(atacap->serial, atacap->serial, sizeof(atacap->serial)); 624 625 if (bootverbose) 626 printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n", 627 device_get_unit(ch->dev), 628 atadev->unit == ATA_MASTER ? "master" : "slave", 629 ata_mode2str(ata_pmode(atacap)), 630 ata_mode2str(ata_wmode(atacap)), 631 ata_mode2str(ata_umode(atacap)), 632 (atacap->hwres & ATA_CABLE_ID) ? "80":"40"); 633 634 if (init) { 635 sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision); 636 device_set_desc_copy(atadev->dev, buffer); 637 if ((atadev->param.config & ATA_PROTO_ATAPI) && 638 (atadev->param.config != ATA_CFA_MAGIC1) && 639 (atadev->param.config != ATA_CFA_MAGIC2)) { 640 if (atapi_dma && ch->dma && 641 (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR && 642 ata_umode(&atadev->param) >= ATA_UDMA2) 643 atadev->mode = ATA_DMA_MAX; 644 } 645 else { 646 if (ata_dma && ch->dma && 647 (ata_umode(&atadev->param) > 0 || 648 ata_wmode(&atadev->param) > 0)) 649 atadev->mode = ATA_DMA_MAX; 650 } 651 } 652 } 653 else { 654 if (!error) 655 error = ENXIO; 656 } 657 return error; 658 } 659 660 int 661 ata_identify(device_t dev) 662 { 663 struct ata_channel *ch = device_get_softc(dev); 664 struct ata_device *master = NULL, *slave = NULL; 665 device_t master_child = NULL, slave_child = NULL; 666 int master_unit = -1, slave_unit = -1; 667 668 if (ch->devices & (ATA_ATA_MASTER | ATA_ATAPI_MASTER)) { 669 if (!(master = malloc(sizeof(struct ata_device), 670 M_ATA, M_NOWAIT | M_ZERO))) { 671 device_printf(dev, "out of memory\n"); 672 return ENOMEM; 673 } 674 master->unit = ATA_MASTER; 675 } 676 if (ch->devices & (ATA_ATA_SLAVE | ATA_ATAPI_SLAVE)) { 677 if (!(slave = malloc(sizeof(struct ata_device), 678 M_ATA, M_NOWAIT | M_ZERO))) { 679 free(master, M_ATA); 680 device_printf(dev, "out of memory\n"); 681 return ENOMEM; 682 } 683 slave->unit = ATA_SLAVE; 684 } 685 686 #ifdef ATA_STATIC_ID 687 if (ch->devices & ATA_ATA_MASTER) 688 master_unit = (device_get_unit(dev) << 1); 689 #endif 690 if (master && !(master_child = ata_add_child(dev, master, master_unit))) { 691 free(master, M_ATA); 692 master = NULL; 693 } 694 #ifdef ATA_STATIC_ID 695 if (ch->devices & ATA_ATA_SLAVE) 696 slave_unit = (device_get_unit(dev) << 1) + 1; 697 #endif 698 if (slave && !(slave_child = ata_add_child(dev, slave, slave_unit))) { 699 free(slave, M_ATA); 700 slave = NULL; 701 } 702 703 if (slave && ata_getparam(slave, 1)) { 704 device_delete_child(dev, slave_child); 705 free(slave, M_ATA); 706 } 707 if (master && ata_getparam(master, 1)) { 708 device_delete_child(dev, master_child); 709 free(master, M_ATA); 710 } 711 712 bus_generic_probe(dev); 713 bus_generic_attach(dev); 714 return 0; 715 } 716 717 void 718 ata_default_registers(device_t dev) 719 { 720 struct ata_channel *ch = device_get_softc(dev); 721 722 /* fill in the defaults from whats setup already */ 723 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res; 724 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset; 725 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res; 726 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset; 727 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res; 728 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset; 729 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res; 730 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset; 731 } 732 733 void 734 ata_modify_if_48bit(struct ata_request *request) 735 { 736 struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); 737 struct ata_device *atadev = device_get_softc(request->dev); 738 739 atadev->flags &= ~ATA_D_48BIT_ACTIVE; 740 741 if ((request->u.ata.lba >= ATA_MAX_28BIT_LBA || 742 request->u.ata.count > 256) && 743 atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) { 744 745 /* translate command into 48bit version */ 746 switch (request->u.ata.command) { 747 case ATA_READ: 748 request->u.ata.command = ATA_READ48; 749 break; 750 case ATA_READ_MUL: 751 request->u.ata.command = ATA_READ_MUL48; 752 break; 753 case ATA_READ_DMA: 754 if (ch->flags & ATA_NO_48BIT_DMA) { 755 if (request->transfersize > DEV_BSIZE) 756 request->u.ata.command = ATA_READ_MUL48; 757 else 758 request->u.ata.command = ATA_READ48; 759 request->flags &= ~ATA_R_DMA; 760 } 761 else 762 request->u.ata.command = ATA_READ_DMA48; 763 break; 764 case ATA_READ_DMA_QUEUED: 765 if (ch->flags & ATA_NO_48BIT_DMA) { 766 if (request->transfersize > DEV_BSIZE) 767 request->u.ata.command = ATA_READ_MUL48; 768 else 769 request->u.ata.command = ATA_READ48; 770 request->flags &= ~ATA_R_DMA; 771 } 772 else 773 request->u.ata.command = ATA_READ_DMA_QUEUED48; 774 break; 775 case ATA_WRITE: 776 request->u.ata.command = ATA_WRITE48; 777 break; 778 case ATA_WRITE_MUL: 779 request->u.ata.command = ATA_WRITE_MUL48; 780 break; 781 case ATA_WRITE_DMA: 782 if (ch->flags & ATA_NO_48BIT_DMA) { 783 if (request->transfersize > DEV_BSIZE) 784 request->u.ata.command = ATA_WRITE_MUL48; 785 else 786 request->u.ata.command = ATA_WRITE48; 787 request->flags &= ~ATA_R_DMA; 788 } 789 else 790 request->u.ata.command = ATA_WRITE_DMA48; 791 break; 792 case ATA_WRITE_DMA_QUEUED: 793 if (ch->flags & ATA_NO_48BIT_DMA) { 794 if (request->transfersize > DEV_BSIZE) 795 request->u.ata.command = ATA_WRITE_MUL48; 796 else 797 request->u.ata.command = ATA_WRITE48; 798 request->u.ata.command = ATA_WRITE48; 799 request->flags &= ~ATA_R_DMA; 800 } 801 else 802 request->u.ata.command = ATA_WRITE_DMA_QUEUED48; 803 break; 804 case ATA_FLUSHCACHE: 805 request->u.ata.command = ATA_FLUSHCACHE48; 806 break; 807 case ATA_READ_NATIVE_MAX_ADDDRESS: 808 request->u.ata.command = ATA_READ_NATIVE_MAX_ADDDRESS48; 809 break; 810 case ATA_SET_MAX_ADDRESS: 811 request->u.ata.command = ATA_SET_MAX_ADDRESS48; 812 break; 813 default: 814 return; 815 } 816 atadev->flags |= ATA_D_48BIT_ACTIVE; 817 } 818 } 819 820 void 821 ata_udelay(int interval) 822 { 823 /* for now just use DELAY, the timer/sleep subsytems are not there yet */ 824 if (1 || interval < (1000000/hz) || ata_delayed_attach) 825 DELAY(interval); 826 else 827 pause("ataslp", interval/(1000000/hz)); 828 } 829 830 char * 831 ata_mode2str(int mode) 832 { 833 switch (mode) { 834 case -1: return "UNSUPPORTED"; 835 case ATA_PIO0: return "PIO0"; 836 case ATA_PIO1: return "PIO1"; 837 case ATA_PIO2: return "PIO2"; 838 case ATA_PIO3: return "PIO3"; 839 case ATA_PIO4: return "PIO4"; 840 case ATA_WDMA0: return "WDMA0"; 841 case ATA_WDMA1: return "WDMA1"; 842 case ATA_WDMA2: return "WDMA2"; 843 case ATA_UDMA0: return "UDMA16"; 844 case ATA_UDMA1: return "UDMA25"; 845 case ATA_UDMA2: return "UDMA33"; 846 case ATA_UDMA3: return "UDMA40"; 847 case ATA_UDMA4: return "UDMA66"; 848 case ATA_UDMA5: return "UDMA100"; 849 case ATA_UDMA6: return "UDMA133"; 850 case ATA_SA150: return "SATA150"; 851 case ATA_SA300: return "SATA300"; 852 case ATA_USB: return "USB"; 853 case ATA_USB1: return "USB1"; 854 case ATA_USB2: return "USB2"; 855 default: 856 if (mode & ATA_DMA_MASK) 857 return "BIOSDMA"; 858 else 859 return "BIOSPIO"; 860 } 861 } 862 863 int 864 ata_pmode(struct ata_params *ap) 865 { 866 if (ap->atavalid & ATA_FLAG_64_70) { 867 if (ap->apiomodes & 0x02) 868 return ATA_PIO4; 869 if (ap->apiomodes & 0x01) 870 return ATA_PIO3; 871 } 872 if (ap->mwdmamodes & 0x04) 873 return ATA_PIO4; 874 if (ap->mwdmamodes & 0x02) 875 return ATA_PIO3; 876 if (ap->mwdmamodes & 0x01) 877 return ATA_PIO2; 878 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200) 879 return ATA_PIO2; 880 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100) 881 return ATA_PIO1; 882 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000) 883 return ATA_PIO0; 884 return ATA_PIO0; 885 } 886 887 int 888 ata_wmode(struct ata_params *ap) 889 { 890 if (ap->mwdmamodes & 0x04) 891 return ATA_WDMA2; 892 if (ap->mwdmamodes & 0x02) 893 return ATA_WDMA1; 894 if (ap->mwdmamodes & 0x01) 895 return ATA_WDMA0; 896 return -1; 897 } 898 899 int 900 ata_umode(struct ata_params *ap) 901 { 902 if (ap->atavalid & ATA_FLAG_88) { 903 if (ap->udmamodes & 0x40) 904 return ATA_UDMA6; 905 if (ap->udmamodes & 0x20) 906 return ATA_UDMA5; 907 if (ap->udmamodes & 0x10) 908 return ATA_UDMA4; 909 if (ap->udmamodes & 0x08) 910 return ATA_UDMA3; 911 if (ap->udmamodes & 0x04) 912 return ATA_UDMA2; 913 if (ap->udmamodes & 0x02) 914 return ATA_UDMA1; 915 if (ap->udmamodes & 0x01) 916 return ATA_UDMA0; 917 } 918 return -1; 919 } 920 921 int 922 ata_limit_mode(device_t dev, int mode, int maxmode) 923 { 924 struct ata_device *atadev = device_get_softc(dev); 925 926 if (maxmode && mode > maxmode) 927 mode = maxmode; 928 929 if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0) 930 return min(mode, ata_umode(&atadev->param)); 931 932 if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0) 933 return min(mode, ata_wmode(&atadev->param)); 934 935 if (mode > ata_pmode(&atadev->param)) 936 return min(mode, ata_pmode(&atadev->param)); 937 938 return mode; 939 } 940 941 static void 942 bswap(int8_t *buf, int len) 943 { 944 u_int16_t *ptr = (u_int16_t*)(buf + len); 945 946 while (--ptr >= (u_int16_t*)buf) 947 *ptr = ntohs(*ptr); 948 } 949 950 static void 951 btrim(int8_t *buf, int len) 952 { 953 int8_t *ptr; 954 955 for (ptr = buf; ptr < buf+len; ++ptr) 956 if (!*ptr || *ptr == '_') 957 *ptr = ' '; 958 for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr) 959 *ptr = 0; 960 } 961 962 static void 963 bpack(int8_t *src, int8_t *dst, int len) 964 { 965 int i, j, blank; 966 967 for (i = j = blank = 0 ; i < len; i++) { 968 if (blank && src[i] == ' ') continue; 969 if (blank && src[i] != ' ') { 970 dst[j++] = src[i]; 971 blank = 0; 972 continue; 973 } 974 if (src[i] == ' ') { 975 blank = 1; 976 if (i == 0) 977 continue; 978 } 979 dst[j++] = src[i]; 980 } 981 if (j < len) 982 dst[j] = 0x00; 983 } 984 985 986 /* 987 * module handeling 988 */ 989 static int 990 ata_module_event_handler(module_t mod, int what, void *arg) 991 { 992 static struct cdev *atacdev; 993 994 switch (what) { 995 case MOD_LOAD: 996 /* register controlling device */ 997 atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata"); 998 999 if (cold) { 1000 /* register boot attach to be run when interrupts are enabled */ 1001 if (!(ata_delayed_attach = (struct intr_config_hook *) 1002 malloc(sizeof(struct intr_config_hook), 1003 M_TEMP, M_NOWAIT | M_ZERO))) { 1004 printf("ata: malloc of delayed attach hook failed\n"); 1005 return EIO; 1006 } 1007 ata_delayed_attach->ich_func = (void*)ata_boot_attach; 1008 if (config_intrhook_establish(ata_delayed_attach) != 0) { 1009 printf("ata: config_intrhook_establish failed\n"); 1010 free(ata_delayed_attach, M_TEMP); 1011 } 1012 } 1013 return 0; 1014 1015 case MOD_UNLOAD: 1016 /* deregister controlling device */ 1017 destroy_dev(atacdev); 1018 return 0; 1019 1020 default: 1021 return EOPNOTSUPP; 1022 } 1023 } 1024 1025 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL }; 1026 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 1027 MODULE_VERSION(ata, 1); 1028 1029 static void 1030 ata_init(void) 1031 { 1032 ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request), 1033 NULL, NULL, NULL, NULL, 0, 0); 1034 ata_composite_zone = uma_zcreate("ata_composite", 1035 sizeof(struct ata_composite), 1036 NULL, NULL, NULL, NULL, 0, 0); 1037 } 1038 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL); 1039 1040 static void 1041 ata_uninit(void) 1042 { 1043 uma_zdestroy(ata_composite_zone); 1044 uma_zdestroy(ata_request_zone); 1045 } 1046 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL); 1047