1 /*- 2 * Copyright (c) 1998 - 2006 S�ren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ata.h" 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/ata.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/endian.h> 37 #include <sys/ctype.h> 38 #include <sys/conf.h> 39 #include <sys/bus.h> 40 #include <sys/bio.h> 41 #include <sys/malloc.h> 42 #include <sys/sysctl.h> 43 #include <sys/sema.h> 44 #include <sys/taskqueue.h> 45 #include <vm/uma.h> 46 #include <machine/stdarg.h> 47 #include <machine/resource.h> 48 #include <machine/bus.h> 49 #include <sys/rman.h> 50 #ifdef __alpha__ 51 #include <machine/md_var.h> 52 #endif 53 #include <dev/ata/ata-all.h> 54 #include <ata_if.h> 55 56 /* device structure */ 57 static d_ioctl_t ata_ioctl; 58 static struct cdevsw ata_cdevsw = { 59 .d_version = D_VERSION, 60 .d_flags = D_NEEDGIANT, /* we need this as newbus isn't mpsafe */ 61 .d_ioctl = ata_ioctl, 62 .d_name = "ata", 63 }; 64 65 /* prototypes */ 66 static void ata_boot_attach(void); 67 static device_t ata_add_child(device_t, struct ata_device *, int); 68 static int ata_getparam(struct ata_device *, int); 69 static void bswap(int8_t *, int); 70 static void btrim(int8_t *, int); 71 static void bpack(int8_t *, int8_t *, int); 72 73 /* global vars */ 74 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer"); 75 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL; 76 struct intr_config_hook *ata_delayed_attach = NULL; 77 devclass_t ata_devclass; 78 uma_zone_t ata_request_zone; 79 uma_zone_t ata_composite_zone; 80 int ata_wc = 1; 81 82 /* local vars */ 83 static int ata_dma = 1; 84 static int atapi_dma = 1; 85 86 /* sysctl vars */ 87 SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); 88 TUNABLE_INT("hw.ata.ata_dma", &ata_dma); 89 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0, 90 "ATA disk DMA mode control"); 91 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma); 92 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0, 93 "ATAPI device DMA mode control"); 94 TUNABLE_INT("hw.ata.wc", &ata_wc); 95 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0, 96 "ATA disk write caching"); 97 98 /* 99 * newbus device interface related functions 100 */ 101 int 102 ata_probe(device_t dev) 103 { 104 return 0; 105 } 106 107 int 108 ata_attach(device_t dev) 109 { 110 struct ata_channel *ch = device_get_softc(dev); 111 int error, rid; 112 113 /* check that we have a virgin channel to attach */ 114 if (ch->r_irq) 115 return EEXIST; 116 117 /* initialize the softc basics */ 118 ch->dev = dev; 119 ch->state = ATA_IDLE; 120 bzero(&ch->state_mtx, sizeof(struct mtx)); 121 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF); 122 bzero(&ch->queue_mtx, sizeof(struct mtx)); 123 mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF); 124 TAILQ_INIT(&ch->ata_queue); 125 126 /* reset the controller HW, the channel and device(s) */ 127 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) 128 tsleep(&error, PRIBIO, "ataatch", 1); 129 ATA_RESET(dev); 130 ATA_LOCKING(dev, ATA_LF_UNLOCK); 131 132 /* setup interrupt delivery */ 133 rid = ATA_IRQ_RID; 134 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 135 RF_SHAREABLE | RF_ACTIVE); 136 if (!ch->r_irq) { 137 device_printf(dev, "unable to allocate interrupt\n"); 138 return ENXIO; 139 } 140 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, 141 (driver_intr_t *)ata_interrupt, ch, &ch->ih))) { 142 device_printf(dev, "unable to setup interrupt\n"); 143 return error; 144 } 145 146 /* probe and attach devices on this channel unless we are in early boot */ 147 if (!ata_delayed_attach) 148 ata_identify(dev); 149 return 0; 150 } 151 152 int 153 ata_detach(device_t dev) 154 { 155 struct ata_channel *ch = device_get_softc(dev); 156 device_t *children; 157 int nchildren, i; 158 159 /* check that we have a valid channel to detach */ 160 if (!ch->r_irq) 161 return ENXIO; 162 163 /* grap the channel lock so no new requests gets launched */ 164 mtx_lock(&ch->state_mtx); 165 ch->state |= ATA_STALL_QUEUE; 166 mtx_unlock(&ch->state_mtx); 167 168 /* detach & delete all children */ 169 if (!device_get_children(dev, &children, &nchildren)) { 170 for (i = 0; i < nchildren; i++) 171 if (children[i]) 172 device_delete_child(dev, children[i]); 173 free(children, M_TEMP); 174 } 175 176 /* release resources */ 177 bus_teardown_intr(dev, ch->r_irq, ch->ih); 178 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 179 ch->r_irq = NULL; 180 mtx_destroy(&ch->state_mtx); 181 mtx_destroy(&ch->queue_mtx); 182 return 0; 183 } 184 185 int 186 ata_reinit(device_t dev) 187 { 188 struct ata_channel *ch = device_get_softc(dev); 189 struct ata_request *request; 190 device_t *children; 191 int nchildren, i; 192 193 /* check that we have a valid channel to reinit */ 194 if (!ch || !ch->r_irq) 195 return ENXIO; 196 197 if (bootverbose) 198 device_printf(dev, "reiniting channel ..\n"); 199 200 /* poll for locking the channel */ 201 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) 202 tsleep(&dev, PRIBIO, "atarini", 1); 203 204 /* catch eventual request in ch->running */ 205 mtx_lock(&ch->state_mtx); 206 if ((request = ch->running)) 207 callout_stop(&request->callout); 208 ch->running = NULL; 209 210 /* unconditionally grap the channel lock */ 211 ch->state |= ATA_STALL_QUEUE; 212 mtx_unlock(&ch->state_mtx); 213 214 /* reset the controller HW, the channel and device(s) */ 215 ATA_RESET(dev); 216 217 /* reinit the children and delete any that fails */ 218 if (!device_get_children(dev, &children, &nchildren)) { 219 mtx_lock(&Giant); /* newbus suckage it needs Giant */ 220 for (i = 0; i < nchildren; i++) { 221 /* did any children go missing ? */ 222 if (children[i] && device_is_attached(children[i]) && 223 ATA_REINIT(children[i])) { 224 /* 225 * if we had a running request and its device matches 226 * this child we need to inform the request that the 227 * device is gone. 228 */ 229 if (request && request->dev == children[i]) { 230 request->result = ENXIO; 231 device_printf(request->dev, "FAILURE - device detached\n"); 232 233 /* if not timeout finish request here */ 234 if (!(request->flags & ATA_R_TIMEOUT)) 235 ata_finish(request); 236 request = NULL; 237 } 238 device_delete_child(dev, children[i]); 239 } 240 } 241 free(children, M_TEMP); 242 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ 243 } 244 245 /* if we still have a good request put it on the queue again */ 246 if (request && !(request->flags & ATA_R_TIMEOUT)) { 247 device_printf(request->dev, 248 "WARNING - %s requeued due to channel reset", 249 ata_cmd2str(request)); 250 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) 251 printf(" LBA=%ju", request->u.ata.lba); 252 printf("\n"); 253 request->flags |= ATA_R_REQUEUE; 254 ata_queue_request(request); 255 } 256 257 /* we're done release the channel for new work */ 258 mtx_lock(&ch->state_mtx); 259 ch->state = ATA_IDLE; 260 mtx_unlock(&ch->state_mtx); 261 ATA_LOCKING(dev, ATA_LF_UNLOCK); 262 263 if (bootverbose) 264 device_printf(dev, "reinit done ..\n"); 265 266 /* kick off requests on the queue */ 267 ata_start(dev); 268 return 0; 269 } 270 271 int 272 ata_suspend(device_t dev) 273 { 274 struct ata_channel *ch; 275 276 /* check for valid device */ 277 if (!dev || !(ch = device_get_softc(dev))) 278 return ENXIO; 279 280 /* wait for the channel to be IDLE before entering suspend mode */ 281 while (1) { 282 mtx_lock(&ch->state_mtx); 283 if (ch->state == ATA_IDLE) { 284 ch->state = ATA_ACTIVE; 285 mtx_unlock(&ch->state_mtx); 286 break; 287 } 288 mtx_unlock(&ch->state_mtx); 289 tsleep(ch, PRIBIO, "atasusp", hz/10); 290 } 291 ATA_LOCKING(dev, ATA_LF_UNLOCK); 292 return 0; 293 } 294 295 int 296 ata_resume(device_t dev) 297 { 298 struct ata_channel *ch; 299 int error; 300 301 /* check for valid device */ 302 if (!dev || !(ch = device_get_softc(dev))) 303 return ENXIO; 304 305 /* reinit the devices, we dont know what mode/state they are in */ 306 error = ata_reinit(dev); 307 308 /* kick off requests on the queue */ 309 ata_start(dev); 310 return error; 311 } 312 313 int 314 ata_interrupt(void *data) 315 { 316 struct ata_channel *ch = (struct ata_channel *)data; 317 struct ata_request *request; 318 319 mtx_lock(&ch->state_mtx); 320 do { 321 /* ignore interrupt if its not for us */ 322 if (ch->hw.status && !ch->hw.status(ch->dev)) 323 break; 324 325 /* do we have a running request */ 326 if (!(request = ch->running)) 327 break; 328 329 ATA_DEBUG_RQ(request, "interrupt"); 330 331 /* safetycheck for the right state */ 332 if (ch->state == ATA_IDLE) { 333 device_printf(request->dev, "interrupt on idle channel ignored\n"); 334 break; 335 } 336 337 /* 338 * we have the HW locks, so end the transaction for this request 339 * if it finishes immediately otherwise wait for next interrupt 340 */ 341 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) { 342 ch->running = NULL; 343 if (ch->state == ATA_ACTIVE) 344 ch->state = ATA_IDLE; 345 mtx_unlock(&ch->state_mtx); 346 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK); 347 ata_finish(request); 348 return 1; 349 } 350 } while (0); 351 mtx_unlock(&ch->state_mtx); 352 return 0; 353 } 354 355 /* 356 * device related interfaces 357 */ 358 static int 359 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data, 360 int32_t flag, struct thread *td) 361 { 362 device_t device, *children; 363 struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data; 364 int *value = (int *)data; 365 int i, nchildren, error = ENOTTY; 366 367 switch (cmd) { 368 case IOCATAGMAXCHANNEL: 369 *value = devclass_get_maxunit(ata_devclass); 370 error = 0; 371 break; 372 373 case IOCATAREINIT: 374 if (*value > devclass_get_maxunit(ata_devclass) || 375 !(device = devclass_get_device(ata_devclass, *value))) 376 return ENXIO; 377 error = ata_reinit(device); 378 ata_start(device); 379 break; 380 381 case IOCATAATTACH: 382 if (*value > devclass_get_maxunit(ata_devclass) || 383 !(device = devclass_get_device(ata_devclass, *value))) 384 return ENXIO; 385 /* XXX SOS should enable channel HW on controller */ 386 error = ata_attach(device); 387 break; 388 389 case IOCATADETACH: 390 if (*value > devclass_get_maxunit(ata_devclass) || 391 !(device = devclass_get_device(ata_devclass, *value))) 392 return ENXIO; 393 error = ata_detach(device); 394 /* XXX SOS should disable channel HW on controller */ 395 break; 396 397 case IOCATADEVICES: 398 if (devices->channel > devclass_get_maxunit(ata_devclass) || 399 !(device = devclass_get_device(ata_devclass, devices->channel))) 400 return ENXIO; 401 bzero(devices->name[0], 32); 402 bzero(&devices->params[0], sizeof(struct ata_params)); 403 bzero(devices->name[1], 32); 404 bzero(&devices->params[1], sizeof(struct ata_params)); 405 if (!device_get_children(device, &children, &nchildren)) { 406 for (i = 0; i < nchildren; i++) { 407 if (children[i] && device_is_attached(children[i])) { 408 struct ata_device *atadev = device_get_softc(children[i]); 409 410 if (atadev->unit == ATA_MASTER) { 411 strncpy(devices->name[0], 412 device_get_nameunit(children[i]), 32); 413 bcopy(&atadev->param, &devices->params[0], 414 sizeof(struct ata_params)); 415 } 416 if (atadev->unit == ATA_SLAVE) { 417 strncpy(devices->name[1], 418 device_get_nameunit(children[i]), 32); 419 bcopy(&atadev->param, &devices->params[1], 420 sizeof(struct ata_params)); 421 } 422 } 423 } 424 free(children, M_TEMP); 425 error = 0; 426 } 427 else 428 error = ENODEV; 429 break; 430 431 default: 432 if (ata_raid_ioctl_func) 433 error = ata_raid_ioctl_func(cmd, data); 434 } 435 return error; 436 } 437 438 int 439 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data) 440 { 441 struct ata_device *atadev = device_get_softc(dev); 442 struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data; 443 struct ata_params *params = (struct ata_params *)data; 444 int *mode = (int *)data; 445 struct ata_request *request; 446 caddr_t buf; 447 int error; 448 449 switch (cmd) { 450 case IOCATAREQUEST: 451 if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) { 452 return ENOMEM; 453 } 454 if (!(request = ata_alloc_request())) { 455 free(buf, M_ATA); 456 return ENOMEM; 457 } 458 if (ioc_request->flags & ATA_CMD_WRITE) { 459 error = copyin(ioc_request->data, buf, ioc_request->count); 460 if (error) { 461 free(buf, M_ATA); 462 ata_free_request(request); 463 return error; 464 } 465 } 466 request->dev = dev; 467 if (ioc_request->flags & ATA_CMD_ATAPI) { 468 request->flags = ATA_R_ATAPI; 469 bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16); 470 } 471 else { 472 request->u.ata.command = ioc_request->u.ata.command; 473 request->u.ata.feature = ioc_request->u.ata.feature; 474 request->u.ata.lba = ioc_request->u.ata.lba; 475 request->u.ata.count = ioc_request->u.ata.count; 476 } 477 request->timeout = ioc_request->timeout; 478 request->data = buf; 479 request->bytecount = ioc_request->count; 480 request->transfersize = request->bytecount; 481 if (ioc_request->flags & ATA_CMD_CONTROL) 482 request->flags |= ATA_R_CONTROL; 483 if (ioc_request->flags & ATA_CMD_READ) 484 request->flags |= ATA_R_READ; 485 if (ioc_request->flags & ATA_CMD_WRITE) 486 request->flags |= ATA_R_WRITE; 487 ata_queue_request(request); 488 if (request->flags & ATA_R_ATAPI) { 489 bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense, 490 sizeof(struct atapi_sense)); 491 } 492 else { 493 ioc_request->u.ata.command = request->u.ata.command; 494 ioc_request->u.ata.feature = request->u.ata.feature; 495 ioc_request->u.ata.lba = request->u.ata.lba; 496 ioc_request->u.ata.count = request->u.ata.count; 497 } 498 ioc_request->error = request->result; 499 if (ioc_request->flags & ATA_CMD_READ) 500 error = copyout(buf, ioc_request->data, ioc_request->count); 501 else 502 error = 0; 503 free(buf, M_ATA); 504 ata_free_request(request); 505 return error; 506 507 case IOCATAGPARM: 508 ata_getparam(atadev, 0); 509 bcopy(&atadev->param, params, sizeof(struct ata_params)); 510 return 0; 511 512 case IOCATASMODE: 513 atadev->mode = *mode; 514 ATA_SETMODE(device_get_parent(dev), dev); 515 return 0; 516 517 case IOCATAGMODE: 518 *mode = atadev->mode; 519 return 0; 520 default: 521 return ENOTTY; 522 } 523 } 524 525 static void 526 ata_boot_attach(void) 527 { 528 struct ata_channel *ch; 529 int ctlr; 530 531 mtx_lock(&Giant); /* newbus suckage it needs Giant */ 532 533 /* kick of probe and attach on all channels */ 534 for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) { 535 if ((ch = devclass_get_softc(ata_devclass, ctlr))) { 536 ata_identify(ch->dev); 537 } 538 } 539 540 /* release the hook that got us here, we are only needed once during boot */ 541 if (ata_delayed_attach) { 542 config_intrhook_disestablish(ata_delayed_attach); 543 free(ata_delayed_attach, M_TEMP); 544 ata_delayed_attach = NULL; 545 } 546 547 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ 548 } 549 550 551 /* 552 * misc support functions 553 */ 554 static device_t 555 ata_add_child(device_t parent, struct ata_device *atadev, int unit) 556 { 557 device_t child; 558 559 if ((child = device_add_child(parent, NULL, unit))) { 560 device_set_softc(child, atadev); 561 device_quiet(child); 562 atadev->dev = child; 563 atadev->max_iosize = DEV_BSIZE; 564 atadev->mode = ATA_PIO_MAX; 565 } 566 return child; 567 } 568 569 static int 570 ata_getparam(struct ata_device *atadev, int init) 571 { 572 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev)); 573 struct ata_request *request; 574 u_int8_t command = 0; 575 int error = ENOMEM, retries = 2; 576 577 if (ch->devices & 578 (atadev->unit == ATA_MASTER ? ATA_ATA_MASTER : ATA_ATA_SLAVE)) 579 command = ATA_ATA_IDENTIFY; 580 if (ch->devices & 581 (atadev->unit == ATA_MASTER ? ATA_ATAPI_MASTER : ATA_ATAPI_SLAVE)) 582 command = ATA_ATAPI_IDENTIFY; 583 if (!command) 584 return ENXIO; 585 586 while (retries-- > 0 && error) { 587 if (!(request = ata_alloc_request())) 588 break; 589 request->dev = atadev->dev; 590 request->timeout = 1; 591 request->retries = 0; 592 request->u.ata.command = command; 593 request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT|ATA_R_QUIET); 594 request->data = (void *)&atadev->param; 595 request->bytecount = sizeof(struct ata_params); 596 request->donecount = 0; 597 request->transfersize = DEV_BSIZE; 598 ata_queue_request(request); 599 error = request->result; 600 ata_free_request(request); 601 } 602 603 if (!error && (isprint(atadev->param.model[0]) || 604 isprint(atadev->param.model[1]))) { 605 struct ata_params *atacap = &atadev->param; 606 char buffer[64]; 607 #if BYTE_ORDER == BIG_ENDIAN 608 int16_t *ptr; 609 610 for (ptr = (int16_t *)atacap; 611 ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) { 612 *ptr = bswap16(*ptr); 613 } 614 #endif 615 if (!(!strncmp(atacap->model, "FX", 2) || 616 !strncmp(atacap->model, "NEC", 3) || 617 !strncmp(atacap->model, "Pioneer", 7) || 618 !strncmp(atacap->model, "SHARP", 5))) { 619 bswap(atacap->model, sizeof(atacap->model)); 620 bswap(atacap->revision, sizeof(atacap->revision)); 621 bswap(atacap->serial, sizeof(atacap->serial)); 622 } 623 btrim(atacap->model, sizeof(atacap->model)); 624 bpack(atacap->model, atacap->model, sizeof(atacap->model)); 625 btrim(atacap->revision, sizeof(atacap->revision)); 626 bpack(atacap->revision, atacap->revision, sizeof(atacap->revision)); 627 btrim(atacap->serial, sizeof(atacap->serial)); 628 bpack(atacap->serial, atacap->serial, sizeof(atacap->serial)); 629 630 if (bootverbose) 631 printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n", 632 device_get_unit(ch->dev), 633 atadev->unit == ATA_MASTER ? "master" : "slave", 634 ata_mode2str(ata_pmode(atacap)), 635 ata_mode2str(ata_wmode(atacap)), 636 ata_mode2str(ata_umode(atacap)), 637 (atacap->hwres & ATA_CABLE_ID) ? "80":"40"); 638 639 if (init) { 640 sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision); 641 device_set_desc_copy(atadev->dev, buffer); 642 if (atadev->param.config & ATA_PROTO_ATAPI) { 643 if (atapi_dma && ch->dma && 644 (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR && 645 ata_umode(&atadev->param) >= ATA_UDMA2) 646 atadev->mode = ATA_DMA_MAX; 647 } 648 else { 649 if (ata_dma && ch->dma && 650 (ata_umode(&atadev->param) > 0 || 651 ata_wmode(&atadev->param) > 0)) 652 atadev->mode = ATA_DMA_MAX; 653 } 654 } 655 } 656 else { 657 if (!error) 658 error = ENXIO; 659 } 660 return error; 661 } 662 663 int 664 ata_identify(device_t dev) 665 { 666 struct ata_channel *ch = device_get_softc(dev); 667 struct ata_device *master = NULL, *slave = NULL; 668 device_t master_child = NULL, slave_child = NULL; 669 int master_unit = -1, slave_unit = -1; 670 671 if (ch->devices & (ATA_ATA_MASTER | ATA_ATAPI_MASTER)) { 672 if (!(master = malloc(sizeof(struct ata_device), 673 M_ATA, M_NOWAIT | M_ZERO))) { 674 device_printf(dev, "out of memory\n"); 675 return ENOMEM; 676 } 677 master->unit = ATA_MASTER; 678 } 679 if (ch->devices & (ATA_ATA_SLAVE | ATA_ATAPI_SLAVE)) { 680 if (!(slave = malloc(sizeof(struct ata_device), 681 M_ATA, M_NOWAIT | M_ZERO))) { 682 free(master, M_ATA); 683 device_printf(dev, "out of memory\n"); 684 return ENOMEM; 685 } 686 slave->unit = ATA_SLAVE; 687 } 688 689 #ifdef ATA_STATIC_ID 690 if (ch->devices & ATA_ATA_MASTER) 691 master_unit = (device_get_unit(dev) << 1); 692 #endif 693 if (master && !(master_child = ata_add_child(dev, master, master_unit))) { 694 free(master, M_ATA); 695 master = NULL; 696 } 697 #ifdef ATA_STATIC_ID 698 if (ch->devices & ATA_ATA_SLAVE) 699 slave_unit = (device_get_unit(dev) << 1) + 1; 700 #endif 701 if (slave && !(slave_child = ata_add_child(dev, slave, slave_unit))) { 702 free(slave, M_ATA); 703 slave = NULL; 704 } 705 706 if (slave && ata_getparam(slave, 1)) { 707 device_delete_child(dev, slave_child); 708 free(slave, M_ATA); 709 } 710 if (master && ata_getparam(master, 1)) { 711 device_delete_child(dev, master_child); 712 free(master, M_ATA); 713 } 714 715 bus_generic_probe(dev); 716 bus_generic_attach(dev); 717 return 0; 718 } 719 720 void 721 ata_default_registers(device_t dev) 722 { 723 struct ata_channel *ch = device_get_softc(dev); 724 725 /* fill in the defaults from whats setup already */ 726 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res; 727 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset; 728 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res; 729 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset; 730 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res; 731 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset; 732 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res; 733 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset; 734 } 735 736 void 737 ata_modify_if_48bit(struct ata_request *request) 738 { 739 struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); 740 struct ata_device *atadev = device_get_softc(request->dev); 741 742 atadev->flags &= ~ATA_D_48BIT_ACTIVE; 743 744 if ((request->u.ata.lba >= ATA_MAX_28BIT_LBA || 745 request->u.ata.count > 256) && 746 atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) { 747 748 /* translate command into 48bit version */ 749 switch (request->u.ata.command) { 750 case ATA_READ: 751 request->u.ata.command = ATA_READ48; 752 break; 753 case ATA_READ_MUL: 754 request->u.ata.command = ATA_READ_MUL48; 755 break; 756 case ATA_READ_DMA: 757 if (ch->flags & ATA_NO_48BIT_DMA) { 758 if (request->transfersize > DEV_BSIZE) 759 request->u.ata.command = ATA_READ_MUL48; 760 else 761 request->u.ata.command = ATA_READ48; 762 request->flags &= ~ATA_R_DMA; 763 } 764 else 765 request->u.ata.command = ATA_READ_DMA48; 766 break; 767 case ATA_READ_DMA_QUEUED: 768 if (ch->flags & ATA_NO_48BIT_DMA) { 769 if (request->transfersize > DEV_BSIZE) 770 request->u.ata.command = ATA_READ_MUL48; 771 else 772 request->u.ata.command = ATA_READ48; 773 request->flags &= ~ATA_R_DMA; 774 } 775 else 776 request->u.ata.command = ATA_READ_DMA_QUEUED48; 777 break; 778 case ATA_WRITE: 779 request->u.ata.command = ATA_WRITE48; 780 break; 781 case ATA_WRITE_MUL: 782 request->u.ata.command = ATA_WRITE_MUL48; 783 break; 784 case ATA_WRITE_DMA: 785 if (ch->flags & ATA_NO_48BIT_DMA) { 786 if (request->transfersize > DEV_BSIZE) 787 request->u.ata.command = ATA_WRITE_MUL48; 788 else 789 request->u.ata.command = ATA_WRITE48; 790 request->flags &= ~ATA_R_DMA; 791 } 792 else 793 request->u.ata.command = ATA_WRITE_DMA48; 794 break; 795 case ATA_WRITE_DMA_QUEUED: 796 if (ch->flags & ATA_NO_48BIT_DMA) { 797 if (request->transfersize > DEV_BSIZE) 798 request->u.ata.command = ATA_WRITE_MUL48; 799 else 800 request->u.ata.command = ATA_WRITE48; 801 request->u.ata.command = ATA_WRITE48; 802 request->flags &= ~ATA_R_DMA; 803 } 804 else 805 request->u.ata.command = ATA_WRITE_DMA_QUEUED48; 806 break; 807 case ATA_FLUSHCACHE: 808 request->u.ata.command = ATA_FLUSHCACHE48; 809 break; 810 case ATA_READ_NATIVE_MAX_ADDDRESS: 811 request->u.ata.command = ATA_READ_NATIVE_MAX_ADDDRESS48; 812 break; 813 case ATA_SET_MAX_ADDRESS: 814 request->u.ata.command = ATA_SET_MAX_ADDRESS48; 815 break; 816 default: 817 return; 818 } 819 atadev->flags |= ATA_D_48BIT_ACTIVE; 820 } 821 } 822 823 void 824 ata_udelay(int interval) 825 { 826 /* for now just use DELAY, the timer/sleep subsytems are not there yet */ 827 if (1 || interval < (1000000/hz) || ata_delayed_attach) 828 DELAY(interval); 829 else 830 tsleep(&interval, PRIBIO, "ataslp", interval/(1000000/hz)); 831 } 832 833 char * 834 ata_mode2str(int mode) 835 { 836 switch (mode) { 837 case -1: return "UNSUPPORTED"; 838 case ATA_PIO0: return "PIO0"; 839 case ATA_PIO1: return "PIO1"; 840 case ATA_PIO2: return "PIO2"; 841 case ATA_PIO3: return "PIO3"; 842 case ATA_PIO4: return "PIO4"; 843 case ATA_WDMA0: return "WDMA0"; 844 case ATA_WDMA1: return "WDMA1"; 845 case ATA_WDMA2: return "WDMA2"; 846 case ATA_UDMA0: return "UDMA16"; 847 case ATA_UDMA1: return "UDMA25"; 848 case ATA_UDMA2: return "UDMA33"; 849 case ATA_UDMA3: return "UDMA40"; 850 case ATA_UDMA4: return "UDMA66"; 851 case ATA_UDMA5: return "UDMA100"; 852 case ATA_UDMA6: return "UDMA133"; 853 case ATA_SA150: return "SATA150"; 854 case ATA_SA300: return "SATA300"; 855 case ATA_USB: return "USB"; 856 case ATA_USB1: return "USB1"; 857 case ATA_USB2: return "USB2"; 858 default: 859 if (mode & ATA_DMA_MASK) 860 return "BIOSDMA"; 861 else 862 return "BIOSPIO"; 863 } 864 } 865 866 int 867 ata_pmode(struct ata_params *ap) 868 { 869 if (ap->atavalid & ATA_FLAG_64_70) { 870 if (ap->apiomodes & 0x02) 871 return ATA_PIO4; 872 if (ap->apiomodes & 0x01) 873 return ATA_PIO3; 874 } 875 if (ap->mwdmamodes & 0x04) 876 return ATA_PIO4; 877 if (ap->mwdmamodes & 0x02) 878 return ATA_PIO3; 879 if (ap->mwdmamodes & 0x01) 880 return ATA_PIO2; 881 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200) 882 return ATA_PIO2; 883 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100) 884 return ATA_PIO1; 885 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000) 886 return ATA_PIO0; 887 return ATA_PIO0; 888 } 889 890 int 891 ata_wmode(struct ata_params *ap) 892 { 893 if (ap->mwdmamodes & 0x04) 894 return ATA_WDMA2; 895 if (ap->mwdmamodes & 0x02) 896 return ATA_WDMA1; 897 if (ap->mwdmamodes & 0x01) 898 return ATA_WDMA0; 899 return -1; 900 } 901 902 int 903 ata_umode(struct ata_params *ap) 904 { 905 if (ap->atavalid & ATA_FLAG_88) { 906 if (ap->udmamodes & 0x40) 907 return ATA_UDMA6; 908 if (ap->udmamodes & 0x20) 909 return ATA_UDMA5; 910 if (ap->udmamodes & 0x10) 911 return ATA_UDMA4; 912 if (ap->udmamodes & 0x08) 913 return ATA_UDMA3; 914 if (ap->udmamodes & 0x04) 915 return ATA_UDMA2; 916 if (ap->udmamodes & 0x02) 917 return ATA_UDMA1; 918 if (ap->udmamodes & 0x01) 919 return ATA_UDMA0; 920 } 921 return -1; 922 } 923 924 int 925 ata_limit_mode(device_t dev, int mode, int maxmode) 926 { 927 struct ata_device *atadev = device_get_softc(dev); 928 929 if (maxmode && mode > maxmode) 930 mode = maxmode; 931 932 if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0) 933 return min(mode, ata_umode(&atadev->param)); 934 935 if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0) 936 return min(mode, ata_wmode(&atadev->param)); 937 938 if (mode > ata_pmode(&atadev->param)) 939 return min(mode, ata_pmode(&atadev->param)); 940 941 return mode; 942 } 943 944 static void 945 bswap(int8_t *buf, int len) 946 { 947 u_int16_t *ptr = (u_int16_t*)(buf + len); 948 949 while (--ptr >= (u_int16_t*)buf) 950 *ptr = ntohs(*ptr); 951 } 952 953 static void 954 btrim(int8_t *buf, int len) 955 { 956 int8_t *ptr; 957 958 for (ptr = buf; ptr < buf+len; ++ptr) 959 if (!*ptr || *ptr == '_') 960 *ptr = ' '; 961 for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr) 962 *ptr = 0; 963 } 964 965 static void 966 bpack(int8_t *src, int8_t *dst, int len) 967 { 968 int i, j, blank; 969 970 for (i = j = blank = 0 ; i < len; i++) { 971 if (blank && src[i] == ' ') continue; 972 if (blank && src[i] != ' ') { 973 dst[j++] = src[i]; 974 blank = 0; 975 continue; 976 } 977 if (src[i] == ' ') { 978 blank = 1; 979 if (i == 0) 980 continue; 981 } 982 dst[j++] = src[i]; 983 } 984 if (j < len) 985 dst[j] = 0x00; 986 } 987 988 989 /* 990 * module handeling 991 */ 992 static int 993 ata_module_event_handler(module_t mod, int what, void *arg) 994 { 995 static struct cdev *atacdev; 996 997 switch (what) { 998 case MOD_LOAD: 999 /* register controlling device */ 1000 atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata"); 1001 1002 if (cold) { 1003 /* register boot attach to be run when interrupts are enabled */ 1004 if (!(ata_delayed_attach = (struct intr_config_hook *) 1005 malloc(sizeof(struct intr_config_hook), 1006 M_TEMP, M_NOWAIT | M_ZERO))) { 1007 printf("ata: malloc of delayed attach hook failed\n"); 1008 return EIO; 1009 } 1010 ata_delayed_attach->ich_func = (void*)ata_boot_attach; 1011 if (config_intrhook_establish(ata_delayed_attach) != 0) { 1012 printf("ata: config_intrhook_establish failed\n"); 1013 free(ata_delayed_attach, M_TEMP); 1014 } 1015 } 1016 return 0; 1017 1018 case MOD_UNLOAD: 1019 /* deregister controlling device */ 1020 destroy_dev(atacdev); 1021 return 0; 1022 1023 default: 1024 return EOPNOTSUPP; 1025 } 1026 } 1027 1028 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL }; 1029 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 1030 MODULE_VERSION(ata, 1); 1031 1032 static void 1033 ata_init(void) 1034 { 1035 ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request), 1036 NULL, NULL, NULL, NULL, 0, 0); 1037 ata_composite_zone = uma_zcreate("ata_composite", 1038 sizeof(struct ata_composite), 1039 NULL, NULL, NULL, NULL, 0, 0); 1040 } 1041 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL); 1042 1043 static void 1044 ata_uninit(void) 1045 { 1046 uma_zdestroy(ata_composite_zone); 1047 uma_zdestroy(ata_request_zone); 1048 } 1049 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL); 1050