1 /*- 2 * Copyright (c) 1998 - 2007 S�ren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ata.h" 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/ata.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/endian.h> 37 #include <sys/ctype.h> 38 #include <sys/conf.h> 39 #include <sys/bus.h> 40 #include <sys/bio.h> 41 #include <sys/malloc.h> 42 #include <sys/sysctl.h> 43 #include <sys/sema.h> 44 #include <sys/taskqueue.h> 45 #include <vm/uma.h> 46 #include <machine/stdarg.h> 47 #include <machine/resource.h> 48 #include <machine/bus.h> 49 #include <sys/rman.h> 50 #include <dev/ata/ata-all.h> 51 #include <ata_if.h> 52 53 /* device structure */ 54 static d_ioctl_t ata_ioctl; 55 static struct cdevsw ata_cdevsw = { 56 .d_version = D_VERSION, 57 .d_flags = D_NEEDGIANT, /* we need this as newbus isn't mpsafe */ 58 .d_ioctl = ata_ioctl, 59 .d_name = "ata", 60 }; 61 62 /* prototypes */ 63 static void ata_boot_attach(void); 64 static device_t ata_add_child(device_t, struct ata_device *, int); 65 static int ata_getparam(struct ata_device *, int); 66 static void bswap(int8_t *, int); 67 static void btrim(int8_t *, int); 68 static void bpack(int8_t *, int8_t *, int); 69 70 /* global vars */ 71 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer"); 72 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL; 73 struct intr_config_hook *ata_delayed_attach = NULL; 74 devclass_t ata_devclass; 75 uma_zone_t ata_request_zone; 76 uma_zone_t ata_composite_zone; 77 int ata_wc = 1; 78 79 /* local vars */ 80 static int ata_dma = 1; 81 static int atapi_dma = 1; 82 83 /* sysctl vars */ 84 SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); 85 TUNABLE_INT("hw.ata.ata_dma", &ata_dma); 86 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0, 87 "ATA disk DMA mode control"); 88 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma); 89 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0, 90 "ATAPI device DMA mode control"); 91 TUNABLE_INT("hw.ata.wc", &ata_wc); 92 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0, 93 "ATA disk write caching"); 94 95 /* 96 * newbus device interface related functions 97 */ 98 int 99 ata_probe(device_t dev) 100 { 101 return 0; 102 } 103 104 int 105 ata_attach(device_t dev) 106 { 107 struct ata_channel *ch = device_get_softc(dev); 108 int error, rid; 109 110 /* check that we have a virgin channel to attach */ 111 if (ch->r_irq) 112 return EEXIST; 113 114 /* initialize the softc basics */ 115 ch->dev = dev; 116 ch->state = ATA_IDLE; 117 bzero(&ch->state_mtx, sizeof(struct mtx)); 118 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF); 119 bzero(&ch->queue_mtx, sizeof(struct mtx)); 120 mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF); 121 TAILQ_INIT(&ch->ata_queue); 122 123 /* reset the controller HW, the channel and device(s) */ 124 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) 125 pause("ataatch", 1); 126 ATA_RESET(dev); 127 ATA_LOCKING(dev, ATA_LF_UNLOCK); 128 129 /* setup interrupt delivery */ 130 rid = ATA_IRQ_RID; 131 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 132 RF_SHAREABLE | RF_ACTIVE); 133 if (!ch->r_irq) { 134 device_printf(dev, "unable to allocate interrupt\n"); 135 return ENXIO; 136 } 137 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 138 (driver_intr_t *)ata_interrupt, ch, &ch->ih))) { 139 device_printf(dev, "unable to setup interrupt\n"); 140 return error; 141 } 142 143 /* probe and attach devices on this channel unless we are in early boot */ 144 if (!ata_delayed_attach) 145 ata_identify(dev); 146 return 0; 147 } 148 149 int 150 ata_detach(device_t dev) 151 { 152 struct ata_channel *ch = device_get_softc(dev); 153 device_t *children; 154 int nchildren, i; 155 156 /* check that we have a valid channel to detach */ 157 if (!ch->r_irq) 158 return ENXIO; 159 160 /* grap the channel lock so no new requests gets launched */ 161 mtx_lock(&ch->state_mtx); 162 ch->state |= ATA_STALL_QUEUE; 163 mtx_unlock(&ch->state_mtx); 164 165 /* detach & delete all children */ 166 if (!device_get_children(dev, &children, &nchildren)) { 167 for (i = 0; i < nchildren; i++) 168 if (children[i]) 169 device_delete_child(dev, children[i]); 170 free(children, M_TEMP); 171 } 172 173 /* release resources */ 174 bus_teardown_intr(dev, ch->r_irq, ch->ih); 175 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 176 ch->r_irq = NULL; 177 mtx_destroy(&ch->state_mtx); 178 mtx_destroy(&ch->queue_mtx); 179 return 0; 180 } 181 182 int 183 ata_reinit(device_t dev) 184 { 185 struct ata_channel *ch = device_get_softc(dev); 186 struct ata_request *request; 187 device_t *children; 188 int nchildren, i; 189 190 /* check that we have a valid channel to reinit */ 191 if (!ch || !ch->r_irq) 192 return ENXIO; 193 194 if (bootverbose) 195 device_printf(dev, "reiniting channel ..\n"); 196 197 /* poll for locking the channel */ 198 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) 199 pause("atarini", 1); 200 201 /* catch eventual request in ch->running */ 202 mtx_lock(&ch->state_mtx); 203 if ((request = ch->running)) 204 callout_stop(&request->callout); 205 ch->running = NULL; 206 207 /* unconditionally grap the channel lock */ 208 ch->state |= ATA_STALL_QUEUE; 209 mtx_unlock(&ch->state_mtx); 210 211 /* reset the controller HW, the channel and device(s) */ 212 ATA_RESET(dev); 213 214 /* reinit the children and delete any that fails */ 215 if (!device_get_children(dev, &children, &nchildren)) { 216 mtx_lock(&Giant); /* newbus suckage it needs Giant */ 217 for (i = 0; i < nchildren; i++) { 218 /* did any children go missing ? */ 219 if (children[i] && device_is_attached(children[i]) && 220 ATA_REINIT(children[i])) { 221 /* 222 * if we had a running request and its device matches 223 * this child we need to inform the request that the 224 * device is gone. 225 */ 226 if (request && request->dev == children[i]) { 227 request->result = ENXIO; 228 device_printf(request->dev, "FAILURE - device detached\n"); 229 230 /* if not timeout finish request here */ 231 if (!(request->flags & ATA_R_TIMEOUT)) 232 ata_finish(request); 233 request = NULL; 234 } 235 device_delete_child(dev, children[i]); 236 } 237 } 238 free(children, M_TEMP); 239 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ 240 } 241 242 /* if we still have a good request put it on the queue again */ 243 if (request && !(request->flags & ATA_R_TIMEOUT)) { 244 device_printf(request->dev, 245 "WARNING - %s requeued due to channel reset", 246 ata_cmd2str(request)); 247 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) 248 printf(" LBA=%ju", request->u.ata.lba); 249 printf("\n"); 250 request->flags |= ATA_R_REQUEUE; 251 ata_queue_request(request); 252 } 253 254 /* we're done release the channel for new work */ 255 mtx_lock(&ch->state_mtx); 256 ch->state = ATA_IDLE; 257 mtx_unlock(&ch->state_mtx); 258 ATA_LOCKING(dev, ATA_LF_UNLOCK); 259 260 if (bootverbose) 261 device_printf(dev, "reinit done ..\n"); 262 263 /* kick off requests on the queue */ 264 ata_start(dev); 265 return 0; 266 } 267 268 int 269 ata_suspend(device_t dev) 270 { 271 struct ata_channel *ch; 272 273 /* check for valid device */ 274 if (!dev || !(ch = device_get_softc(dev))) 275 return ENXIO; 276 277 /* wait for the channel to be IDLE or detached before suspending */ 278 while (ch->r_irq) { 279 mtx_lock(&ch->state_mtx); 280 if (ch->state == ATA_IDLE) { 281 ch->state = ATA_ACTIVE; 282 mtx_unlock(&ch->state_mtx); 283 break; 284 } 285 mtx_unlock(&ch->state_mtx); 286 tsleep(ch, PRIBIO, "atasusp", hz/10); 287 } 288 ATA_LOCKING(dev, ATA_LF_UNLOCK); 289 return 0; 290 } 291 292 int 293 ata_resume(device_t dev) 294 { 295 struct ata_channel *ch; 296 int error; 297 298 /* check for valid device */ 299 if (!dev || !(ch = device_get_softc(dev))) 300 return ENXIO; 301 302 /* reinit the devices, we dont know what mode/state they are in */ 303 error = ata_reinit(dev); 304 305 /* kick off requests on the queue */ 306 ata_start(dev); 307 return error; 308 } 309 310 int 311 ata_interrupt(void *data) 312 { 313 struct ata_channel *ch = (struct ata_channel *)data; 314 struct ata_request *request; 315 316 mtx_lock(&ch->state_mtx); 317 do { 318 /* ignore interrupt if its not for us */ 319 if (ch->hw.status && !ch->hw.status(ch->dev)) 320 break; 321 322 /* do we have a running request */ 323 if (!(request = ch->running)) 324 break; 325 326 ATA_DEBUG_RQ(request, "interrupt"); 327 328 /* safetycheck for the right state */ 329 if (ch->state == ATA_IDLE) { 330 device_printf(request->dev, "interrupt on idle channel ignored\n"); 331 break; 332 } 333 334 /* 335 * we have the HW locks, so end the transaction for this request 336 * if it finishes immediately otherwise wait for next interrupt 337 */ 338 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) { 339 ch->running = NULL; 340 if (ch->state == ATA_ACTIVE) 341 ch->state = ATA_IDLE; 342 mtx_unlock(&ch->state_mtx); 343 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK); 344 ata_finish(request); 345 return 1; 346 } 347 } while (0); 348 mtx_unlock(&ch->state_mtx); 349 return 0; 350 } 351 352 /* 353 * device related interfaces 354 */ 355 static int 356 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data, 357 int32_t flag, struct thread *td) 358 { 359 device_t device, *children; 360 struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data; 361 int *value = (int *)data; 362 int i, nchildren, error = ENOTTY; 363 364 switch (cmd) { 365 case IOCATAGMAXCHANNEL: 366 *value = devclass_get_maxunit(ata_devclass); 367 error = 0; 368 break; 369 370 case IOCATAREINIT: 371 if (*value > devclass_get_maxunit(ata_devclass) || 372 !(device = devclass_get_device(ata_devclass, *value))) 373 return ENXIO; 374 error = ata_reinit(device); 375 ata_start(device); 376 break; 377 378 case IOCATAATTACH: 379 if (*value > devclass_get_maxunit(ata_devclass) || 380 !(device = devclass_get_device(ata_devclass, *value))) 381 return ENXIO; 382 /* XXX SOS should enable channel HW on controller */ 383 error = ata_attach(device); 384 break; 385 386 case IOCATADETACH: 387 if (*value > devclass_get_maxunit(ata_devclass) || 388 !(device = devclass_get_device(ata_devclass, *value))) 389 return ENXIO; 390 error = ata_detach(device); 391 /* XXX SOS should disable channel HW on controller */ 392 break; 393 394 case IOCATADEVICES: 395 if (devices->channel > devclass_get_maxunit(ata_devclass) || 396 !(device = devclass_get_device(ata_devclass, devices->channel))) 397 return ENXIO; 398 bzero(devices->name[0], 32); 399 bzero(&devices->params[0], sizeof(struct ata_params)); 400 bzero(devices->name[1], 32); 401 bzero(&devices->params[1], sizeof(struct ata_params)); 402 if (!device_get_children(device, &children, &nchildren)) { 403 for (i = 0; i < nchildren; i++) { 404 if (children[i] && device_is_attached(children[i])) { 405 struct ata_device *atadev = device_get_softc(children[i]); 406 407 if (atadev->unit == ATA_MASTER) { 408 strncpy(devices->name[0], 409 device_get_nameunit(children[i]), 32); 410 bcopy(&atadev->param, &devices->params[0], 411 sizeof(struct ata_params)); 412 } 413 if (atadev->unit == ATA_SLAVE) { 414 strncpy(devices->name[1], 415 device_get_nameunit(children[i]), 32); 416 bcopy(&atadev->param, &devices->params[1], 417 sizeof(struct ata_params)); 418 } 419 } 420 } 421 free(children, M_TEMP); 422 error = 0; 423 } 424 else 425 error = ENODEV; 426 break; 427 428 default: 429 if (ata_raid_ioctl_func) 430 error = ata_raid_ioctl_func(cmd, data); 431 } 432 return error; 433 } 434 435 int 436 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data) 437 { 438 struct ata_device *atadev = device_get_softc(dev); 439 struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data; 440 struct ata_params *params = (struct ata_params *)data; 441 int *mode = (int *)data; 442 struct ata_request *request; 443 caddr_t buf; 444 int error; 445 446 switch (cmd) { 447 case IOCATAREQUEST: 448 if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) { 449 return ENOMEM; 450 } 451 if (!(request = ata_alloc_request())) { 452 free(buf, M_ATA); 453 return ENOMEM; 454 } 455 if (ioc_request->flags & ATA_CMD_WRITE) { 456 error = copyin(ioc_request->data, buf, ioc_request->count); 457 if (error) { 458 free(buf, M_ATA); 459 ata_free_request(request); 460 return error; 461 } 462 } 463 request->dev = dev; 464 if (ioc_request->flags & ATA_CMD_ATAPI) { 465 request->flags = ATA_R_ATAPI; 466 bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16); 467 } 468 else { 469 request->u.ata.command = ioc_request->u.ata.command; 470 request->u.ata.feature = ioc_request->u.ata.feature; 471 request->u.ata.lba = ioc_request->u.ata.lba; 472 request->u.ata.count = ioc_request->u.ata.count; 473 } 474 request->timeout = ioc_request->timeout; 475 request->data = buf; 476 request->bytecount = ioc_request->count; 477 request->transfersize = request->bytecount; 478 if (ioc_request->flags & ATA_CMD_CONTROL) 479 request->flags |= ATA_R_CONTROL; 480 if (ioc_request->flags & ATA_CMD_READ) 481 request->flags |= ATA_R_READ; 482 if (ioc_request->flags & ATA_CMD_WRITE) 483 request->flags |= ATA_R_WRITE; 484 ata_queue_request(request); 485 if (request->flags & ATA_R_ATAPI) { 486 bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense, 487 sizeof(struct atapi_sense)); 488 } 489 else { 490 ioc_request->u.ata.command = request->u.ata.command; 491 ioc_request->u.ata.feature = request->u.ata.feature; 492 ioc_request->u.ata.lba = request->u.ata.lba; 493 ioc_request->u.ata.count = request->u.ata.count; 494 } 495 ioc_request->error = request->result; 496 if (ioc_request->flags & ATA_CMD_READ) 497 error = copyout(buf, ioc_request->data, ioc_request->count); 498 else 499 error = 0; 500 free(buf, M_ATA); 501 ata_free_request(request); 502 return error; 503 504 case IOCATAGPARM: 505 ata_getparam(atadev, 0); 506 bcopy(&atadev->param, params, sizeof(struct ata_params)); 507 return 0; 508 509 case IOCATASMODE: 510 atadev->mode = *mode; 511 ATA_SETMODE(device_get_parent(dev), dev); 512 return 0; 513 514 case IOCATAGMODE: 515 *mode = atadev->mode; 516 return 0; 517 case IOCATASSPINDOWN: 518 atadev->spindown = *mode; 519 return 0; 520 case IOCATAGSPINDOWN: 521 *mode = atadev->spindown; 522 return 0; 523 default: 524 return ENOTTY; 525 } 526 } 527 528 static void 529 ata_boot_attach(void) 530 { 531 struct ata_channel *ch; 532 int ctlr; 533 534 mtx_lock(&Giant); /* newbus suckage it needs Giant */ 535 536 /* kick of probe and attach on all channels */ 537 for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) { 538 if ((ch = devclass_get_softc(ata_devclass, ctlr))) { 539 ata_identify(ch->dev); 540 } 541 } 542 543 /* release the hook that got us here, we are only needed once during boot */ 544 if (ata_delayed_attach) { 545 config_intrhook_disestablish(ata_delayed_attach); 546 free(ata_delayed_attach, M_TEMP); 547 ata_delayed_attach = NULL; 548 } 549 550 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ 551 } 552 553 554 /* 555 * misc support functions 556 */ 557 static device_t 558 ata_add_child(device_t parent, struct ata_device *atadev, int unit) 559 { 560 device_t child; 561 562 if ((child = device_add_child(parent, NULL, unit))) { 563 device_set_softc(child, atadev); 564 device_quiet(child); 565 atadev->dev = child; 566 atadev->max_iosize = DEV_BSIZE; 567 atadev->mode = ATA_PIO_MAX; 568 } 569 return child; 570 } 571 572 static int 573 ata_getparam(struct ata_device *atadev, int init) 574 { 575 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev)); 576 struct ata_request *request; 577 u_int8_t command = 0; 578 int error = ENOMEM, retries = 2; 579 580 if (ch->devices & 581 (atadev->unit == ATA_MASTER ? ATA_ATA_MASTER : ATA_ATA_SLAVE)) 582 command = ATA_ATA_IDENTIFY; 583 if (ch->devices & 584 (atadev->unit == ATA_MASTER ? ATA_ATAPI_MASTER : ATA_ATAPI_SLAVE)) 585 command = ATA_ATAPI_IDENTIFY; 586 if (!command) 587 return ENXIO; 588 589 while (retries-- > 0 && error) { 590 if (!(request = ata_alloc_request())) 591 break; 592 request->dev = atadev->dev; 593 request->timeout = 1; 594 request->retries = 0; 595 request->u.ata.command = command; 596 request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT|ATA_R_QUIET); 597 request->data = (void *)&atadev->param; 598 request->bytecount = sizeof(struct ata_params); 599 request->donecount = 0; 600 request->transfersize = DEV_BSIZE; 601 ata_queue_request(request); 602 error = request->result; 603 ata_free_request(request); 604 } 605 606 if (!error && (isprint(atadev->param.model[0]) || 607 isprint(atadev->param.model[1]))) { 608 struct ata_params *atacap = &atadev->param; 609 char buffer[64]; 610 int16_t *ptr; 611 612 for (ptr = (int16_t *)atacap; 613 ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) { 614 *ptr = le16toh(*ptr); 615 } 616 if (!(!strncmp(atacap->model, "FX", 2) || 617 !strncmp(atacap->model, "NEC", 3) || 618 !strncmp(atacap->model, "Pioneer", 7) || 619 !strncmp(atacap->model, "SHARP", 5))) { 620 bswap(atacap->model, sizeof(atacap->model)); 621 bswap(atacap->revision, sizeof(atacap->revision)); 622 bswap(atacap->serial, sizeof(atacap->serial)); 623 } 624 btrim(atacap->model, sizeof(atacap->model)); 625 bpack(atacap->model, atacap->model, sizeof(atacap->model)); 626 btrim(atacap->revision, sizeof(atacap->revision)); 627 bpack(atacap->revision, atacap->revision, sizeof(atacap->revision)); 628 btrim(atacap->serial, sizeof(atacap->serial)); 629 bpack(atacap->serial, atacap->serial, sizeof(atacap->serial)); 630 631 if (bootverbose) 632 printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n", 633 device_get_unit(ch->dev), 634 atadev->unit == ATA_MASTER ? "master" : "slave", 635 ata_mode2str(ata_pmode(atacap)), 636 ata_mode2str(ata_wmode(atacap)), 637 ata_mode2str(ata_umode(atacap)), 638 (atacap->hwres & ATA_CABLE_ID) ? "80":"40"); 639 640 if (init) { 641 sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision); 642 device_set_desc_copy(atadev->dev, buffer); 643 if ((atadev->param.config & ATA_PROTO_ATAPI) && 644 (atadev->param.config != ATA_CFA_MAGIC1) && 645 (atadev->param.config != ATA_CFA_MAGIC2)) { 646 if (atapi_dma && ch->dma && 647 (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR && 648 ata_umode(&atadev->param) >= ATA_UDMA2) 649 atadev->mode = ATA_DMA_MAX; 650 } 651 else { 652 if (ata_dma && ch->dma && 653 (ata_umode(&atadev->param) > 0 || 654 ata_wmode(&atadev->param) > 0)) 655 atadev->mode = ATA_DMA_MAX; 656 } 657 } 658 } 659 else { 660 if (!error) 661 error = ENXIO; 662 } 663 return error; 664 } 665 666 int 667 ata_identify(device_t dev) 668 { 669 struct ata_channel *ch = device_get_softc(dev); 670 struct ata_device *master = NULL, *slave = NULL; 671 device_t master_child = NULL, slave_child = NULL; 672 int master_unit = -1, slave_unit = -1; 673 674 if (ch->devices & (ATA_ATA_MASTER | ATA_ATAPI_MASTER)) { 675 if (!(master = malloc(sizeof(struct ata_device), 676 M_ATA, M_NOWAIT | M_ZERO))) { 677 device_printf(dev, "out of memory\n"); 678 return ENOMEM; 679 } 680 master->unit = ATA_MASTER; 681 } 682 if (ch->devices & (ATA_ATA_SLAVE | ATA_ATAPI_SLAVE)) { 683 if (!(slave = malloc(sizeof(struct ata_device), 684 M_ATA, M_NOWAIT | M_ZERO))) { 685 free(master, M_ATA); 686 device_printf(dev, "out of memory\n"); 687 return ENOMEM; 688 } 689 slave->unit = ATA_SLAVE; 690 } 691 692 #ifdef ATA_STATIC_ID 693 if (ch->devices & ATA_ATA_MASTER) 694 master_unit = (device_get_unit(dev) << 1); 695 #endif 696 if (master && !(master_child = ata_add_child(dev, master, master_unit))) { 697 free(master, M_ATA); 698 master = NULL; 699 } 700 #ifdef ATA_STATIC_ID 701 if (ch->devices & ATA_ATA_SLAVE) 702 slave_unit = (device_get_unit(dev) << 1) + 1; 703 #endif 704 if (slave && !(slave_child = ata_add_child(dev, slave, slave_unit))) { 705 free(slave, M_ATA); 706 slave = NULL; 707 } 708 709 if (slave && ata_getparam(slave, 1)) { 710 device_delete_child(dev, slave_child); 711 free(slave, M_ATA); 712 } 713 if (master && ata_getparam(master, 1)) { 714 device_delete_child(dev, master_child); 715 free(master, M_ATA); 716 } 717 718 bus_generic_probe(dev); 719 bus_generic_attach(dev); 720 return 0; 721 } 722 723 void 724 ata_default_registers(device_t dev) 725 { 726 struct ata_channel *ch = device_get_softc(dev); 727 728 /* fill in the defaults from whats setup already */ 729 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res; 730 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset; 731 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res; 732 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset; 733 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res; 734 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset; 735 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res; 736 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset; 737 } 738 739 void 740 ata_modify_if_48bit(struct ata_request *request) 741 { 742 struct ata_channel *ch = device_get_softc(device_get_parent(request->dev)); 743 struct ata_device *atadev = device_get_softc(request->dev); 744 745 atadev->flags &= ~ATA_D_48BIT_ACTIVE; 746 747 if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA || 748 request->u.ata.count > 256) && 749 atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) { 750 751 /* translate command into 48bit version */ 752 switch (request->u.ata.command) { 753 case ATA_READ: 754 request->u.ata.command = ATA_READ48; 755 break; 756 case ATA_READ_MUL: 757 request->u.ata.command = ATA_READ_MUL48; 758 break; 759 case ATA_READ_DMA: 760 if (ch->flags & ATA_NO_48BIT_DMA) { 761 if (request->transfersize > DEV_BSIZE) 762 request->u.ata.command = ATA_READ_MUL48; 763 else 764 request->u.ata.command = ATA_READ48; 765 request->flags &= ~ATA_R_DMA; 766 } 767 else 768 request->u.ata.command = ATA_READ_DMA48; 769 break; 770 case ATA_READ_DMA_QUEUED: 771 if (ch->flags & ATA_NO_48BIT_DMA) { 772 if (request->transfersize > DEV_BSIZE) 773 request->u.ata.command = ATA_READ_MUL48; 774 else 775 request->u.ata.command = ATA_READ48; 776 request->flags &= ~ATA_R_DMA; 777 } 778 else 779 request->u.ata.command = ATA_READ_DMA_QUEUED48; 780 break; 781 case ATA_WRITE: 782 request->u.ata.command = ATA_WRITE48; 783 break; 784 case ATA_WRITE_MUL: 785 request->u.ata.command = ATA_WRITE_MUL48; 786 break; 787 case ATA_WRITE_DMA: 788 if (ch->flags & ATA_NO_48BIT_DMA) { 789 if (request->transfersize > DEV_BSIZE) 790 request->u.ata.command = ATA_WRITE_MUL48; 791 else 792 request->u.ata.command = ATA_WRITE48; 793 request->flags &= ~ATA_R_DMA; 794 } 795 else 796 request->u.ata.command = ATA_WRITE_DMA48; 797 break; 798 case ATA_WRITE_DMA_QUEUED: 799 if (ch->flags & ATA_NO_48BIT_DMA) { 800 if (request->transfersize > DEV_BSIZE) 801 request->u.ata.command = ATA_WRITE_MUL48; 802 else 803 request->u.ata.command = ATA_WRITE48; 804 request->u.ata.command = ATA_WRITE48; 805 request->flags &= ~ATA_R_DMA; 806 } 807 else 808 request->u.ata.command = ATA_WRITE_DMA_QUEUED48; 809 break; 810 case ATA_FLUSHCACHE: 811 request->u.ata.command = ATA_FLUSHCACHE48; 812 break; 813 case ATA_READ_NATIVE_MAX_ADDDRESS: 814 request->u.ata.command = ATA_READ_NATIVE_MAX_ADDDRESS48; 815 break; 816 case ATA_SET_MAX_ADDRESS: 817 request->u.ata.command = ATA_SET_MAX_ADDRESS48; 818 break; 819 default: 820 return; 821 } 822 atadev->flags |= ATA_D_48BIT_ACTIVE; 823 } 824 } 825 826 void 827 ata_udelay(int interval) 828 { 829 /* for now just use DELAY, the timer/sleep subsytems are not there yet */ 830 if (1 || interval < (1000000/hz) || ata_delayed_attach) 831 DELAY(interval); 832 else 833 pause("ataslp", interval/(1000000/hz)); 834 } 835 836 char * 837 ata_mode2str(int mode) 838 { 839 switch (mode) { 840 case -1: return "UNSUPPORTED"; 841 case ATA_PIO0: return "PIO0"; 842 case ATA_PIO1: return "PIO1"; 843 case ATA_PIO2: return "PIO2"; 844 case ATA_PIO3: return "PIO3"; 845 case ATA_PIO4: return "PIO4"; 846 case ATA_WDMA0: return "WDMA0"; 847 case ATA_WDMA1: return "WDMA1"; 848 case ATA_WDMA2: return "WDMA2"; 849 case ATA_UDMA0: return "UDMA16"; 850 case ATA_UDMA1: return "UDMA25"; 851 case ATA_UDMA2: return "UDMA33"; 852 case ATA_UDMA3: return "UDMA40"; 853 case ATA_UDMA4: return "UDMA66"; 854 case ATA_UDMA5: return "UDMA100"; 855 case ATA_UDMA6: return "UDMA133"; 856 case ATA_SA150: return "SATA150"; 857 case ATA_SA300: return "SATA300"; 858 case ATA_USB: return "USB"; 859 case ATA_USB1: return "USB1"; 860 case ATA_USB2: return "USB2"; 861 default: 862 if (mode & ATA_DMA_MASK) 863 return "BIOSDMA"; 864 else 865 return "BIOSPIO"; 866 } 867 } 868 869 int 870 ata_pmode(struct ata_params *ap) 871 { 872 if (ap->atavalid & ATA_FLAG_64_70) { 873 if (ap->apiomodes & 0x02) 874 return ATA_PIO4; 875 if (ap->apiomodes & 0x01) 876 return ATA_PIO3; 877 } 878 if (ap->mwdmamodes & 0x04) 879 return ATA_PIO4; 880 if (ap->mwdmamodes & 0x02) 881 return ATA_PIO3; 882 if (ap->mwdmamodes & 0x01) 883 return ATA_PIO2; 884 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200) 885 return ATA_PIO2; 886 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100) 887 return ATA_PIO1; 888 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000) 889 return ATA_PIO0; 890 return ATA_PIO0; 891 } 892 893 int 894 ata_wmode(struct ata_params *ap) 895 { 896 if (ap->mwdmamodes & 0x04) 897 return ATA_WDMA2; 898 if (ap->mwdmamodes & 0x02) 899 return ATA_WDMA1; 900 if (ap->mwdmamodes & 0x01) 901 return ATA_WDMA0; 902 return -1; 903 } 904 905 int 906 ata_umode(struct ata_params *ap) 907 { 908 if (ap->atavalid & ATA_FLAG_88) { 909 if (ap->udmamodes & 0x40) 910 return ATA_UDMA6; 911 if (ap->udmamodes & 0x20) 912 return ATA_UDMA5; 913 if (ap->udmamodes & 0x10) 914 return ATA_UDMA4; 915 if (ap->udmamodes & 0x08) 916 return ATA_UDMA3; 917 if (ap->udmamodes & 0x04) 918 return ATA_UDMA2; 919 if (ap->udmamodes & 0x02) 920 return ATA_UDMA1; 921 if (ap->udmamodes & 0x01) 922 return ATA_UDMA0; 923 } 924 return -1; 925 } 926 927 int 928 ata_limit_mode(device_t dev, int mode, int maxmode) 929 { 930 struct ata_device *atadev = device_get_softc(dev); 931 932 if (maxmode && mode > maxmode) 933 mode = maxmode; 934 935 if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0) 936 return min(mode, ata_umode(&atadev->param)); 937 938 if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0) 939 return min(mode, ata_wmode(&atadev->param)); 940 941 if (mode > ata_pmode(&atadev->param)) 942 return min(mode, ata_pmode(&atadev->param)); 943 944 return mode; 945 } 946 947 static void 948 bswap(int8_t *buf, int len) 949 { 950 u_int16_t *ptr = (u_int16_t*)(buf + len); 951 952 while (--ptr >= (u_int16_t*)buf) 953 *ptr = ntohs(*ptr); 954 } 955 956 static void 957 btrim(int8_t *buf, int len) 958 { 959 int8_t *ptr; 960 961 for (ptr = buf; ptr < buf+len; ++ptr) 962 if (!*ptr || *ptr == '_') 963 *ptr = ' '; 964 for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr) 965 *ptr = 0; 966 } 967 968 static void 969 bpack(int8_t *src, int8_t *dst, int len) 970 { 971 int i, j, blank; 972 973 for (i = j = blank = 0 ; i < len; i++) { 974 if (blank && src[i] == ' ') continue; 975 if (blank && src[i] != ' ') { 976 dst[j++] = src[i]; 977 blank = 0; 978 continue; 979 } 980 if (src[i] == ' ') { 981 blank = 1; 982 if (i == 0) 983 continue; 984 } 985 dst[j++] = src[i]; 986 } 987 if (j < len) 988 dst[j] = 0x00; 989 } 990 991 992 /* 993 * module handeling 994 */ 995 static int 996 ata_module_event_handler(module_t mod, int what, void *arg) 997 { 998 static struct cdev *atacdev; 999 1000 switch (what) { 1001 case MOD_LOAD: 1002 /* register controlling device */ 1003 atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata"); 1004 1005 if (cold) { 1006 /* register boot attach to be run when interrupts are enabled */ 1007 if (!(ata_delayed_attach = (struct intr_config_hook *) 1008 malloc(sizeof(struct intr_config_hook), 1009 M_TEMP, M_NOWAIT | M_ZERO))) { 1010 printf("ata: malloc of delayed attach hook failed\n"); 1011 return EIO; 1012 } 1013 ata_delayed_attach->ich_func = (void*)ata_boot_attach; 1014 if (config_intrhook_establish(ata_delayed_attach) != 0) { 1015 printf("ata: config_intrhook_establish failed\n"); 1016 free(ata_delayed_attach, M_TEMP); 1017 } 1018 } 1019 return 0; 1020 1021 case MOD_UNLOAD: 1022 /* deregister controlling device */ 1023 destroy_dev(atacdev); 1024 return 0; 1025 1026 default: 1027 return EOPNOTSUPP; 1028 } 1029 } 1030 1031 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL }; 1032 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 1033 MODULE_VERSION(ata, 1); 1034 1035 static void 1036 ata_init(void) 1037 { 1038 ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request), 1039 NULL, NULL, NULL, NULL, 0, 0); 1040 ata_composite_zone = uma_zcreate("ata_composite", 1041 sizeof(struct ata_composite), 1042 NULL, NULL, NULL, NULL, 0, 0); 1043 } 1044 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL); 1045 1046 static void 1047 ata_uninit(void) 1048 { 1049 uma_zdestroy(ata_composite_zone); 1050 uma_zdestroy(ata_request_zone); 1051 } 1052 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL); 1053