1 /*- 2 * Copyright (c) 1998 - 2008 S�ren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ata.h" 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/ata.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/endian.h> 37 #include <sys/ctype.h> 38 #include <sys/conf.h> 39 #include <sys/bus.h> 40 #include <sys/bio.h> 41 #include <sys/malloc.h> 42 #include <sys/sysctl.h> 43 #include <sys/sema.h> 44 #include <sys/taskqueue.h> 45 #include <vm/uma.h> 46 #include <machine/stdarg.h> 47 #include <machine/resource.h> 48 #include <machine/bus.h> 49 #include <sys/rman.h> 50 #include <dev/ata/ata-all.h> 51 #include <dev/pci/pcivar.h> 52 #include <ata_if.h> 53 54 #ifdef ATA_CAM 55 #include <cam/cam.h> 56 #include <cam/cam_ccb.h> 57 #include <cam/cam_sim.h> 58 #include <cam/cam_xpt_sim.h> 59 #include <cam/cam_debug.h> 60 #endif 61 62 #ifndef ATA_CAM 63 /* device structure */ 64 static d_ioctl_t ata_ioctl; 65 static struct cdevsw ata_cdevsw = { 66 .d_version = D_VERSION, 67 .d_flags = D_NEEDGIANT, /* we need this as newbus isn't mpsafe */ 68 .d_ioctl = ata_ioctl, 69 .d_name = "ata", 70 }; 71 #endif 72 73 /* prototypes */ 74 #ifndef ATA_CAM 75 static void ata_boot_attach(void); 76 static device_t ata_add_child(device_t, struct ata_device *, int); 77 #else 78 static void ataaction(struct cam_sim *sim, union ccb *ccb); 79 static void atapoll(struct cam_sim *sim); 80 #endif 81 static void ata_conn_event(void *, int); 82 static void bswap(int8_t *, int); 83 static void btrim(int8_t *, int); 84 static void bpack(int8_t *, int8_t *, int); 85 static void ata_interrupt_locked(void *data); 86 static void ata_periodic_poll(void *data); 87 88 /* global vars */ 89 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer"); 90 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL; 91 struct intr_config_hook *ata_delayed_attach = NULL; 92 devclass_t ata_devclass; 93 uma_zone_t ata_request_zone; 94 uma_zone_t ata_composite_zone; 95 int ata_wc = 1; 96 int ata_setmax = 0; 97 int ata_dma_check_80pin = 1; 98 99 /* local vars */ 100 static int ata_dma = 1; 101 static int atapi_dma = 1; 102 103 /* sysctl vars */ 104 SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); 105 TUNABLE_INT("hw.ata.ata_dma", &ata_dma); 106 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma, CTLFLAG_RDTUN, &ata_dma, 0, 107 "ATA disk DMA mode control"); 108 TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin); 109 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin, 110 CTLFLAG_RW, &ata_dma_check_80pin, 1, 111 "Check for 80pin cable before setting ATA DMA mode"); 112 TUNABLE_INT("hw.ata.atapi_dma", &atapi_dma); 113 SYSCTL_INT(_hw_ata, OID_AUTO, atapi_dma, CTLFLAG_RDTUN, &atapi_dma, 0, 114 "ATAPI device DMA mode control"); 115 TUNABLE_INT("hw.ata.wc", &ata_wc); 116 SYSCTL_INT(_hw_ata, OID_AUTO, wc, CTLFLAG_RDTUN, &ata_wc, 0, 117 "ATA disk write caching"); 118 TUNABLE_INT("hw.ata.setmax", &ata_setmax); 119 SYSCTL_INT(_hw_ata, OID_AUTO, setmax, CTLFLAG_RDTUN, &ata_setmax, 0, 120 "ATA disk set max native address"); 121 122 /* 123 * newbus device interface related functions 124 */ 125 int 126 ata_probe(device_t dev) 127 { 128 return 0; 129 } 130 131 int 132 ata_attach(device_t dev) 133 { 134 struct ata_channel *ch = device_get_softc(dev); 135 int error, rid; 136 #ifdef ATA_CAM 137 struct cam_devq *devq; 138 const char *res; 139 char buf[64]; 140 int i, mode; 141 #endif 142 143 /* check that we have a virgin channel to attach */ 144 if (ch->r_irq) 145 return EEXIST; 146 147 /* initialize the softc basics */ 148 ch->dev = dev; 149 ch->state = ATA_IDLE; 150 bzero(&ch->state_mtx, sizeof(struct mtx)); 151 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF); 152 bzero(&ch->queue_mtx, sizeof(struct mtx)); 153 mtx_init(&ch->queue_mtx, "ATA queue lock", NULL, MTX_DEF); 154 TAILQ_INIT(&ch->ata_queue); 155 TASK_INIT(&ch->conntask, 0, ata_conn_event, dev); 156 #ifdef ATA_CAM 157 for (i = 0; i < 16; i++) { 158 ch->user[i].mode = 0; 159 snprintf(buf, sizeof(buf), "dev%d.mode", i); 160 if (resource_string_value(device_get_name(dev), 161 device_get_unit(dev), buf, &res) == 0) 162 mode = ata_str2mode(res); 163 else if (resource_string_value(device_get_name(dev), 164 device_get_unit(dev), "mode", &res) == 0) 165 mode = ata_str2mode(res); 166 else 167 mode = -1; 168 if (mode >= 0) 169 ch->user[i].mode = mode; 170 if (ch->flags & ATA_SATA) 171 ch->user[i].bytecount = 8192; 172 else 173 ch->user[i].bytecount = MAXPHYS; 174 ch->user[i].caps = 0; 175 ch->curr[i] = ch->user[i]; 176 if (ch->pm_level > 0) 177 ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ; 178 if (ch->pm_level > 1) 179 ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ; 180 } 181 #endif 182 callout_init(&ch->poll_callout, 1); 183 184 /* reset the controller HW, the channel and device(s) */ 185 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) 186 pause("ataatch", 1); 187 #ifndef ATA_CAM 188 ATA_RESET(dev); 189 #endif 190 ATA_LOCKING(dev, ATA_LF_UNLOCK); 191 192 /* allocate DMA resources if DMA HW present*/ 193 if (ch->dma.alloc) 194 ch->dma.alloc(dev); 195 196 /* setup interrupt delivery */ 197 rid = ATA_IRQ_RID; 198 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 199 RF_SHAREABLE | RF_ACTIVE); 200 if (!ch->r_irq) { 201 device_printf(dev, "unable to allocate interrupt\n"); 202 return ENXIO; 203 } 204 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 205 ata_interrupt, ch, &ch->ih))) { 206 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); 207 device_printf(dev, "unable to setup interrupt\n"); 208 return error; 209 } 210 if (ch->flags & ATA_PERIODIC_POLL) 211 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 212 213 #ifndef ATA_CAM 214 /* probe and attach devices on this channel unless we are in early boot */ 215 if (!ata_delayed_attach) 216 ata_identify(dev); 217 return (0); 218 #else 219 mtx_lock(&ch->state_mtx); 220 /* Create the device queue for our SIM. */ 221 devq = cam_simq_alloc(1); 222 if (devq == NULL) { 223 device_printf(dev, "Unable to allocate simq\n"); 224 error = ENOMEM; 225 goto err1; 226 } 227 /* Construct SIM entry */ 228 ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch, 229 device_get_unit(dev), &ch->state_mtx, 1, 0, devq); 230 if (ch->sim == NULL) { 231 device_printf(dev, "unable to allocate sim\n"); 232 cam_simq_free(devq); 233 error = ENOMEM; 234 goto err1; 235 } 236 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { 237 device_printf(dev, "unable to register xpt bus\n"); 238 error = ENXIO; 239 goto err2; 240 } 241 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), 242 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 243 device_printf(dev, "unable to create path\n"); 244 error = ENXIO; 245 goto err3; 246 } 247 mtx_unlock(&ch->state_mtx); 248 return (0); 249 250 err3: 251 xpt_bus_deregister(cam_sim_path(ch->sim)); 252 err2: 253 cam_sim_free(ch->sim, /*free_devq*/TRUE); 254 ch->sim = NULL; 255 err1: 256 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); 257 mtx_unlock(&ch->state_mtx); 258 if (ch->flags & ATA_PERIODIC_POLL) 259 callout_drain(&ch->poll_callout); 260 return (error); 261 #endif 262 } 263 264 int 265 ata_detach(device_t dev) 266 { 267 struct ata_channel *ch = device_get_softc(dev); 268 #ifndef ATA_CAM 269 device_t *children; 270 int nchildren, i; 271 #endif 272 273 /* check that we have a valid channel to detach */ 274 if (!ch->r_irq) 275 return ENXIO; 276 277 /* grap the channel lock so no new requests gets launched */ 278 mtx_lock(&ch->state_mtx); 279 ch->state |= ATA_STALL_QUEUE; 280 mtx_unlock(&ch->state_mtx); 281 if (ch->flags & ATA_PERIODIC_POLL) 282 callout_drain(&ch->poll_callout); 283 284 #ifndef ATA_CAM 285 /* detach & delete all children */ 286 if (!device_get_children(dev, &children, &nchildren)) { 287 for (i = 0; i < nchildren; i++) 288 if (children[i]) 289 device_delete_child(dev, children[i]); 290 free(children, M_TEMP); 291 } 292 #endif 293 taskqueue_drain(taskqueue_thread, &ch->conntask); 294 295 #ifdef ATA_CAM 296 mtx_lock(&ch->state_mtx); 297 xpt_async(AC_LOST_DEVICE, ch->path, NULL); 298 xpt_free_path(ch->path); 299 xpt_bus_deregister(cam_sim_path(ch->sim)); 300 cam_sim_free(ch->sim, /*free_devq*/TRUE); 301 ch->sim = NULL; 302 mtx_unlock(&ch->state_mtx); 303 #endif 304 305 /* release resources */ 306 bus_teardown_intr(dev, ch->r_irq, ch->ih); 307 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 308 ch->r_irq = NULL; 309 310 /* free DMA resources if DMA HW present*/ 311 if (ch->dma.free) 312 ch->dma.free(dev); 313 314 mtx_destroy(&ch->state_mtx); 315 mtx_destroy(&ch->queue_mtx); 316 return 0; 317 } 318 319 static void 320 ata_conn_event(void *context, int dummy) 321 { 322 device_t dev = (device_t)context; 323 #ifdef ATA_CAM 324 struct ata_channel *ch = device_get_softc(dev); 325 union ccb *ccb; 326 327 mtx_lock(&ch->state_mtx); 328 if (ch->sim == NULL) { 329 mtx_unlock(&ch->state_mtx); 330 return; 331 } 332 ata_reinit(dev); 333 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 334 return; 335 if (xpt_create_path(&ccb->ccb_h.path, NULL, 336 cam_sim_path(ch->sim), 337 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 338 xpt_free_ccb(ccb); 339 return; 340 } 341 xpt_rescan(ccb); 342 mtx_unlock(&ch->state_mtx); 343 #else 344 ata_reinit(dev); 345 #endif 346 } 347 348 int 349 ata_reinit(device_t dev) 350 { 351 struct ata_channel *ch = device_get_softc(dev); 352 struct ata_request *request; 353 #ifndef ATA_CAM 354 device_t *children; 355 int nchildren, i; 356 357 /* check that we have a valid channel to reinit */ 358 if (!ch || !ch->r_irq) 359 return ENXIO; 360 361 if (bootverbose) 362 device_printf(dev, "reiniting channel ..\n"); 363 364 /* poll for locking the channel */ 365 while (ATA_LOCKING(dev, ATA_LF_LOCK) != ch->unit) 366 pause("atarini", 1); 367 368 /* catch eventual request in ch->running */ 369 mtx_lock(&ch->state_mtx); 370 if (ch->state & ATA_STALL_QUEUE) { 371 /* Recursive reinits and reinits during detach prohobited. */ 372 mtx_unlock(&ch->state_mtx); 373 return (ENXIO); 374 } 375 if ((request = ch->running)) 376 callout_stop(&request->callout); 377 ch->running = NULL; 378 379 /* unconditionally grap the channel lock */ 380 ch->state |= ATA_STALL_QUEUE; 381 mtx_unlock(&ch->state_mtx); 382 383 /* reset the controller HW, the channel and device(s) */ 384 ATA_RESET(dev); 385 386 /* reinit the children and delete any that fails */ 387 if (!device_get_children(dev, &children, &nchildren)) { 388 mtx_lock(&Giant); /* newbus suckage it needs Giant */ 389 for (i = 0; i < nchildren; i++) { 390 /* did any children go missing ? */ 391 if (children[i] && device_is_attached(children[i]) && 392 ATA_REINIT(children[i])) { 393 /* 394 * if we had a running request and its device matches 395 * this child we need to inform the request that the 396 * device is gone. 397 */ 398 if (request && request->dev == children[i]) { 399 request->result = ENXIO; 400 device_printf(request->dev, "FAILURE - device detached\n"); 401 402 /* if not timeout finish request here */ 403 if (!(request->flags & ATA_R_TIMEOUT)) 404 ata_finish(request); 405 request = NULL; 406 } 407 device_delete_child(dev, children[i]); 408 } 409 } 410 free(children, M_TEMP); 411 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ 412 } 413 414 /* if we still have a good request put it on the queue again */ 415 if (request && !(request->flags & ATA_R_TIMEOUT)) { 416 device_printf(request->dev, 417 "WARNING - %s requeued due to channel reset", 418 ata_cmd2str(request)); 419 if (!(request->flags & (ATA_R_ATAPI | ATA_R_CONTROL))) 420 printf(" LBA=%ju", request->u.ata.lba); 421 printf("\n"); 422 request->flags |= ATA_R_REQUEUE; 423 ata_queue_request(request); 424 } 425 426 /* we're done release the channel for new work */ 427 mtx_lock(&ch->state_mtx); 428 ch->state = ATA_IDLE; 429 mtx_unlock(&ch->state_mtx); 430 ATA_LOCKING(dev, ATA_LF_UNLOCK); 431 432 /* Add new children. */ 433 /* ata_identify(dev); */ 434 435 if (bootverbose) 436 device_printf(dev, "reinit done ..\n"); 437 438 /* kick off requests on the queue */ 439 ata_start(dev); 440 #else 441 xpt_freeze_simq(ch->sim, 1); 442 if ((request = ch->running)) { 443 ch->running = NULL; 444 if (ch->state == ATA_ACTIVE) 445 ch->state = ATA_IDLE; 446 callout_stop(&request->callout); 447 if (ch->dma.unload) 448 ch->dma.unload(request); 449 request->result = ERESTART; 450 ata_cam_end_transaction(dev, request); 451 } 452 /* reset the controller HW, the channel and device(s) */ 453 ATA_RESET(dev); 454 /* Tell the XPT about the event */ 455 xpt_async(AC_BUS_RESET, ch->path, NULL); 456 xpt_release_simq(ch->sim, TRUE); 457 #endif 458 return(0); 459 } 460 461 int 462 ata_suspend(device_t dev) 463 { 464 struct ata_channel *ch; 465 466 /* check for valid device */ 467 if (!dev || !(ch = device_get_softc(dev))) 468 return ENXIO; 469 470 if (ch->flags & ATA_PERIODIC_POLL) 471 callout_drain(&ch->poll_callout); 472 #ifdef ATA_CAM 473 mtx_lock(&ch->state_mtx); 474 xpt_freeze_simq(ch->sim, 1); 475 while (ch->state != ATA_IDLE) 476 msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100); 477 mtx_unlock(&ch->state_mtx); 478 #else 479 /* wait for the channel to be IDLE or detached before suspending */ 480 while (ch->r_irq) { 481 mtx_lock(&ch->state_mtx); 482 if (ch->state == ATA_IDLE) { 483 ch->state = ATA_ACTIVE; 484 mtx_unlock(&ch->state_mtx); 485 break; 486 } 487 mtx_unlock(&ch->state_mtx); 488 tsleep(ch, PRIBIO, "atasusp", hz/10); 489 } 490 ATA_LOCKING(dev, ATA_LF_UNLOCK); 491 #endif 492 return(0); 493 } 494 495 int 496 ata_resume(device_t dev) 497 { 498 struct ata_channel *ch; 499 int error; 500 501 /* check for valid device */ 502 if (!dev || !(ch = device_get_softc(dev))) 503 return ENXIO; 504 505 #ifdef ATA_CAM 506 mtx_lock(&ch->state_mtx); 507 error = ata_reinit(dev); 508 xpt_release_simq(ch->sim, TRUE); 509 mtx_unlock(&ch->state_mtx); 510 #else 511 /* reinit the devices, we dont know what mode/state they are in */ 512 error = ata_reinit(dev); 513 /* kick off requests on the queue */ 514 ata_start(dev); 515 #endif 516 if (ch->flags & ATA_PERIODIC_POLL) 517 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 518 return error; 519 } 520 521 void 522 ata_interrupt(void *data) 523 { 524 #ifdef ATA_CAM 525 struct ata_channel *ch = (struct ata_channel *)data; 526 527 mtx_lock(&ch->state_mtx); 528 #endif 529 ata_interrupt_locked(data); 530 #ifdef ATA_CAM 531 mtx_unlock(&ch->state_mtx); 532 #endif 533 } 534 535 static void 536 ata_interrupt_locked(void *data) 537 { 538 struct ata_channel *ch = (struct ata_channel *)data; 539 struct ata_request *request; 540 541 #ifndef ATA_CAM 542 mtx_lock(&ch->state_mtx); 543 #endif 544 do { 545 /* ignore interrupt if its not for us */ 546 if (ch->hw.status && !ch->hw.status(ch->dev)) 547 break; 548 549 /* do we have a running request */ 550 if (!(request = ch->running)) 551 break; 552 553 ATA_DEBUG_RQ(request, "interrupt"); 554 555 /* safetycheck for the right state */ 556 if (ch->state == ATA_IDLE) { 557 device_printf(request->dev, "interrupt on idle channel ignored\n"); 558 break; 559 } 560 561 /* 562 * we have the HW locks, so end the transaction for this request 563 * if it finishes immediately otherwise wait for next interrupt 564 */ 565 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) { 566 ch->running = NULL; 567 if (ch->state == ATA_ACTIVE) 568 ch->state = ATA_IDLE; 569 #ifdef ATA_CAM 570 ata_cam_end_transaction(ch->dev, request); 571 #else 572 mtx_unlock(&ch->state_mtx); 573 ATA_LOCKING(ch->dev, ATA_LF_UNLOCK); 574 ata_finish(request); 575 #endif 576 return; 577 } 578 } while (0); 579 #ifndef ATA_CAM 580 mtx_unlock(&ch->state_mtx); 581 #endif 582 } 583 584 static void 585 ata_periodic_poll(void *data) 586 { 587 struct ata_channel *ch = (struct ata_channel *)data; 588 589 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 590 ata_interrupt(ch); 591 } 592 593 void 594 ata_print_cable(device_t dev, u_int8_t *who) 595 { 596 device_printf(dev, 597 "DMA limited to UDMA33, %s found non-ATA66 cable\n", who); 598 } 599 600 int 601 ata_check_80pin(device_t dev, int mode) 602 { 603 struct ata_device *atadev = device_get_softc(dev); 604 605 if (!ata_dma_check_80pin) { 606 if (bootverbose) 607 device_printf(dev, "Skipping 80pin cable check\n"); 608 return mode; 609 } 610 611 if (mode > ATA_UDMA2 && !(atadev->param.hwres & ATA_CABLE_ID)) { 612 ata_print_cable(dev, "device"); 613 mode = ATA_UDMA2; 614 } 615 return mode; 616 } 617 618 void 619 ata_setmode(device_t dev) 620 { 621 struct ata_channel *ch = device_get_softc(device_get_parent(dev)); 622 struct ata_device *atadev = device_get_softc(dev); 623 int error, mode, pmode; 624 625 mode = atadev->mode; 626 do { 627 pmode = mode = ata_limit_mode(dev, mode, ATA_DMA_MAX); 628 mode = ATA_SETMODE(device_get_parent(dev), atadev->unit, mode); 629 if ((ch->flags & (ATA_CHECKS_CABLE | ATA_SATA)) == 0) 630 mode = ata_check_80pin(dev, mode); 631 } while (pmode != mode); /* Interate till successfull negotiation. */ 632 error = ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode); 633 if (bootverbose) 634 device_printf(dev, "%ssetting %s\n", 635 (error) ? "FAILURE " : "", ata_mode2str(mode)); 636 atadev->mode = mode; 637 } 638 639 /* 640 * device related interfaces 641 */ 642 #ifndef ATA_CAM 643 static int 644 ata_ioctl(struct cdev *dev, u_long cmd, caddr_t data, 645 int32_t flag, struct thread *td) 646 { 647 device_t device, *children; 648 struct ata_ioc_devices *devices = (struct ata_ioc_devices *)data; 649 int *value = (int *)data; 650 int i, nchildren, error = ENOTTY; 651 652 switch (cmd) { 653 case IOCATAGMAXCHANNEL: 654 /* In case we have channel 0..n this will return n+1. */ 655 *value = devclass_get_maxunit(ata_devclass); 656 error = 0; 657 break; 658 659 case IOCATAREINIT: 660 if (*value >= devclass_get_maxunit(ata_devclass) || 661 !(device = devclass_get_device(ata_devclass, *value)) || 662 !device_is_attached(device)) 663 return ENXIO; 664 error = ata_reinit(device); 665 break; 666 667 case IOCATAATTACH: 668 if (*value >= devclass_get_maxunit(ata_devclass) || 669 !(device = devclass_get_device(ata_devclass, *value)) || 670 !device_is_attached(device)) 671 return ENXIO; 672 error = DEVICE_ATTACH(device); 673 break; 674 675 case IOCATADETACH: 676 if (*value >= devclass_get_maxunit(ata_devclass) || 677 !(device = devclass_get_device(ata_devclass, *value)) || 678 !device_is_attached(device)) 679 return ENXIO; 680 error = DEVICE_DETACH(device); 681 break; 682 683 case IOCATADEVICES: 684 if (devices->channel >= devclass_get_maxunit(ata_devclass) || 685 !(device = devclass_get_device(ata_devclass, devices->channel)) || 686 !device_is_attached(device)) 687 return ENXIO; 688 bzero(devices->name[0], 32); 689 bzero(&devices->params[0], sizeof(struct ata_params)); 690 bzero(devices->name[1], 32); 691 bzero(&devices->params[1], sizeof(struct ata_params)); 692 if (!device_get_children(device, &children, &nchildren)) { 693 for (i = 0; i < nchildren; i++) { 694 if (children[i] && device_is_attached(children[i])) { 695 struct ata_device *atadev = device_get_softc(children[i]); 696 697 if (atadev->unit == ATA_MASTER) { /* XXX SOS PM */ 698 strncpy(devices->name[0], 699 device_get_nameunit(children[i]), 32); 700 bcopy(&atadev->param, &devices->params[0], 701 sizeof(struct ata_params)); 702 } 703 if (atadev->unit == ATA_SLAVE) { /* XXX SOS PM */ 704 strncpy(devices->name[1], 705 device_get_nameunit(children[i]), 32); 706 bcopy(&atadev->param, &devices->params[1], 707 sizeof(struct ata_params)); 708 } 709 } 710 } 711 free(children, M_TEMP); 712 error = 0; 713 } 714 else 715 error = ENODEV; 716 break; 717 718 default: 719 if (ata_raid_ioctl_func) 720 error = ata_raid_ioctl_func(cmd, data); 721 } 722 return error; 723 } 724 #endif 725 726 int 727 ata_device_ioctl(device_t dev, u_long cmd, caddr_t data) 728 { 729 struct ata_device *atadev = device_get_softc(dev); 730 struct ata_channel *ch = device_get_softc(device_get_parent(dev)); 731 struct ata_ioc_request *ioc_request = (struct ata_ioc_request *)data; 732 struct ata_params *params = (struct ata_params *)data; 733 int *mode = (int *)data; 734 struct ata_request *request; 735 caddr_t buf; 736 int error; 737 738 switch (cmd) { 739 case IOCATAREQUEST: 740 if (ioc_request->count > 741 (ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS)) { 742 return (EFBIG); 743 } 744 if (!(buf = malloc(ioc_request->count, M_ATA, M_NOWAIT))) { 745 return ENOMEM; 746 } 747 if (!(request = ata_alloc_request())) { 748 free(buf, M_ATA); 749 return ENOMEM; 750 } 751 request->dev = atadev->dev; 752 if (ioc_request->flags & ATA_CMD_WRITE) { 753 error = copyin(ioc_request->data, buf, ioc_request->count); 754 if (error) { 755 free(buf, M_ATA); 756 ata_free_request(request); 757 return error; 758 } 759 } 760 if (ioc_request->flags & ATA_CMD_ATAPI) { 761 request->flags = ATA_R_ATAPI; 762 bcopy(ioc_request->u.atapi.ccb, request->u.atapi.ccb, 16); 763 } 764 else { 765 request->u.ata.command = ioc_request->u.ata.command; 766 request->u.ata.feature = ioc_request->u.ata.feature; 767 request->u.ata.lba = ioc_request->u.ata.lba; 768 request->u.ata.count = ioc_request->u.ata.count; 769 } 770 request->timeout = ioc_request->timeout; 771 request->data = buf; 772 request->bytecount = ioc_request->count; 773 request->transfersize = request->bytecount; 774 if (ioc_request->flags & ATA_CMD_CONTROL) 775 request->flags |= ATA_R_CONTROL; 776 if (ioc_request->flags & ATA_CMD_READ) 777 request->flags |= ATA_R_READ; 778 if (ioc_request->flags & ATA_CMD_WRITE) 779 request->flags |= ATA_R_WRITE; 780 ata_queue_request(request); 781 if (request->flags & ATA_R_ATAPI) { 782 bcopy(&request->u.atapi.sense, &ioc_request->u.atapi.sense, 783 sizeof(struct atapi_sense)); 784 } 785 else { 786 ioc_request->u.ata.command = request->u.ata.command; 787 ioc_request->u.ata.feature = request->u.ata.feature; 788 ioc_request->u.ata.lba = request->u.ata.lba; 789 ioc_request->u.ata.count = request->u.ata.count; 790 } 791 ioc_request->error = request->result; 792 if (ioc_request->flags & ATA_CMD_READ) 793 error = copyout(buf, ioc_request->data, ioc_request->count); 794 else 795 error = 0; 796 free(buf, M_ATA); 797 ata_free_request(request); 798 return error; 799 800 case IOCATAGPARM: 801 ata_getparam(atadev, 0); 802 bcopy(&atadev->param, params, sizeof(struct ata_params)); 803 return 0; 804 805 case IOCATASMODE: 806 atadev->mode = *mode; 807 ata_setmode(dev); 808 return 0; 809 810 case IOCATAGMODE: 811 *mode = atadev->mode | 812 (ATA_GETREV(device_get_parent(dev), atadev->unit) << 8); 813 return 0; 814 case IOCATASSPINDOWN: 815 atadev->spindown = *mode; 816 return 0; 817 case IOCATAGSPINDOWN: 818 *mode = atadev->spindown; 819 return 0; 820 default: 821 return ENOTTY; 822 } 823 } 824 825 #ifndef ATA_CAM 826 static void 827 ata_boot_attach(void) 828 { 829 struct ata_channel *ch; 830 int ctlr; 831 832 mtx_lock(&Giant); /* newbus suckage it needs Giant */ 833 834 /* kick of probe and attach on all channels */ 835 for (ctlr = 0; ctlr < devclass_get_maxunit(ata_devclass); ctlr++) { 836 if ((ch = devclass_get_softc(ata_devclass, ctlr))) { 837 ata_identify(ch->dev); 838 } 839 } 840 841 /* release the hook that got us here, we are only needed once during boot */ 842 if (ata_delayed_attach) { 843 config_intrhook_disestablish(ata_delayed_attach); 844 free(ata_delayed_attach, M_TEMP); 845 ata_delayed_attach = NULL; 846 } 847 848 mtx_unlock(&Giant); /* newbus suckage dealt with, release Giant */ 849 } 850 #endif 851 852 /* 853 * misc support functions 854 */ 855 #ifndef ATA_CAM 856 static device_t 857 ata_add_child(device_t parent, struct ata_device *atadev, int unit) 858 { 859 device_t child; 860 861 if ((child = device_add_child(parent, NULL, unit))) { 862 device_set_softc(child, atadev); 863 device_quiet(child); 864 atadev->dev = child; 865 atadev->max_iosize = DEV_BSIZE; 866 atadev->mode = ATA_PIO_MAX; 867 } 868 return child; 869 } 870 #endif 871 872 int 873 ata_getparam(struct ata_device *atadev, int init) 874 { 875 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev)); 876 struct ata_request *request; 877 const char *res; 878 char buf[64]; 879 u_int8_t command = 0; 880 int error = ENOMEM, retries = 2, mode = -1; 881 882 if (ch->devices & (ATA_ATA_MASTER << atadev->unit)) 883 command = ATA_ATA_IDENTIFY; 884 if (ch->devices & (ATA_ATAPI_MASTER << atadev->unit)) 885 command = ATA_ATAPI_IDENTIFY; 886 if (!command) 887 return ENXIO; 888 889 while (retries-- > 0 && error) { 890 if (!(request = ata_alloc_request())) 891 break; 892 request->dev = atadev->dev; 893 request->timeout = 1; 894 request->retries = 0; 895 request->u.ata.command = command; 896 request->flags = (ATA_R_READ|ATA_R_AT_HEAD|ATA_R_DIRECT); 897 if (!bootverbose) 898 request->flags |= ATA_R_QUIET; 899 request->data = (void *)&atadev->param; 900 request->bytecount = sizeof(struct ata_params); 901 request->donecount = 0; 902 request->transfersize = DEV_BSIZE; 903 ata_queue_request(request); 904 error = request->result; 905 ata_free_request(request); 906 } 907 908 if (!error && (isprint(atadev->param.model[0]) || 909 isprint(atadev->param.model[1]))) { 910 struct ata_params *atacap = &atadev->param; 911 int16_t *ptr; 912 913 for (ptr = (int16_t *)atacap; 914 ptr < (int16_t *)atacap + sizeof(struct ata_params)/2; ptr++) { 915 *ptr = le16toh(*ptr); 916 } 917 if (!(!strncmp(atacap->model, "FX", 2) || 918 !strncmp(atacap->model, "NEC", 3) || 919 !strncmp(atacap->model, "Pioneer", 7) || 920 !strncmp(atacap->model, "SHARP", 5))) { 921 bswap(atacap->model, sizeof(atacap->model)); 922 bswap(atacap->revision, sizeof(atacap->revision)); 923 bswap(atacap->serial, sizeof(atacap->serial)); 924 } 925 btrim(atacap->model, sizeof(atacap->model)); 926 bpack(atacap->model, atacap->model, sizeof(atacap->model)); 927 btrim(atacap->revision, sizeof(atacap->revision)); 928 bpack(atacap->revision, atacap->revision, sizeof(atacap->revision)); 929 btrim(atacap->serial, sizeof(atacap->serial)); 930 bpack(atacap->serial, atacap->serial, sizeof(atacap->serial)); 931 932 if (bootverbose) 933 printf("ata%d-%s: pio=%s wdma=%s udma=%s cable=%s wire\n", 934 device_get_unit(ch->dev), 935 ata_unit2str(atadev), 936 ata_mode2str(ata_pmode(atacap)), 937 ata_mode2str(ata_wmode(atacap)), 938 ata_mode2str(ata_umode(atacap)), 939 (atacap->hwres & ATA_CABLE_ID) ? "80":"40"); 940 941 if (init) { 942 char buffer[64]; 943 944 sprintf(buffer, "%.40s/%.8s", atacap->model, atacap->revision); 945 device_set_desc_copy(atadev->dev, buffer); 946 if ((atadev->param.config & ATA_PROTO_ATAPI) && 947 (atadev->param.config != ATA_CFA_MAGIC1) && 948 (atadev->param.config != ATA_CFA_MAGIC2)) { 949 if (atapi_dma && 950 (atadev->param.config & ATA_DRQ_MASK) != ATA_DRQ_INTR && 951 ata_umode(&atadev->param) >= ATA_UDMA2) 952 atadev->mode = ATA_DMA_MAX; 953 } 954 else { 955 if (ata_dma && 956 (ata_umode(&atadev->param) > 0 || 957 ata_wmode(&atadev->param) > 0)) 958 atadev->mode = ATA_DMA_MAX; 959 } 960 snprintf(buf, sizeof(buf), "dev%d.mode", atadev->unit); 961 if (resource_string_value(device_get_name(ch->dev), 962 device_get_unit(ch->dev), buf, &res) == 0) 963 mode = ata_str2mode(res); 964 else if (resource_string_value(device_get_name(ch->dev), 965 device_get_unit(ch->dev), "mode", &res) == 0) 966 mode = ata_str2mode(res); 967 if (mode >= 0) 968 atadev->mode = mode; 969 } 970 } 971 else { 972 if (!error) 973 error = ENXIO; 974 } 975 return error; 976 } 977 978 #ifndef ATA_CAM 979 int 980 ata_identify(device_t dev) 981 { 982 struct ata_channel *ch = device_get_softc(dev); 983 struct ata_device *atadev; 984 device_t *children; 985 device_t child, master = NULL; 986 int nchildren, i, n = ch->devices; 987 988 if (bootverbose) 989 device_printf(dev, "Identifying devices: %08x\n", ch->devices); 990 991 mtx_lock(&Giant); 992 /* Skip existing devices. */ 993 if (!device_get_children(dev, &children, &nchildren)) { 994 for (i = 0; i < nchildren; i++) { 995 if (children[i] && (atadev = device_get_softc(children[i]))) 996 n &= ~((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << atadev->unit); 997 } 998 free(children, M_TEMP); 999 } 1000 /* Create new devices. */ 1001 if (bootverbose) 1002 device_printf(dev, "New devices: %08x\n", n); 1003 if (n == 0) { 1004 mtx_unlock(&Giant); 1005 return (0); 1006 } 1007 for (i = 0; i < ATA_PM; ++i) { 1008 if (n & (((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << i))) { 1009 int unit = -1; 1010 1011 if (!(atadev = malloc(sizeof(struct ata_device), 1012 M_ATA, M_NOWAIT | M_ZERO))) { 1013 device_printf(dev, "out of memory\n"); 1014 return ENOMEM; 1015 } 1016 atadev->unit = i; 1017 #ifdef ATA_STATIC_ID 1018 if (n & (ATA_ATA_MASTER << i)) 1019 unit = (device_get_unit(dev) << 1) + i; 1020 #endif 1021 if ((child = ata_add_child(dev, atadev, unit))) { 1022 /* 1023 * PATA slave should be identified first, to allow 1024 * device cable detection on master to work properly. 1025 */ 1026 if (i == 0 && (n & ATA_PORTMULTIPLIER) == 0 && 1027 (n & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << 1)) != 0) { 1028 master = child; 1029 continue; 1030 } 1031 if (ata_getparam(atadev, 1)) { 1032 device_delete_child(dev, child); 1033 free(atadev, M_ATA); 1034 } 1035 } 1036 else 1037 free(atadev, M_ATA); 1038 } 1039 } 1040 if (master) { 1041 atadev = device_get_softc(master); 1042 if (ata_getparam(atadev, 1)) { 1043 device_delete_child(dev, master); 1044 free(atadev, M_ATA); 1045 } 1046 } 1047 bus_generic_probe(dev); 1048 bus_generic_attach(dev); 1049 mtx_unlock(&Giant); 1050 return 0; 1051 } 1052 #endif 1053 1054 void 1055 ata_default_registers(device_t dev) 1056 { 1057 struct ata_channel *ch = device_get_softc(dev); 1058 1059 /* fill in the defaults from whats setup already */ 1060 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res; 1061 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset; 1062 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res; 1063 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset; 1064 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res; 1065 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset; 1066 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res; 1067 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset; 1068 } 1069 1070 void 1071 ata_modify_if_48bit(struct ata_request *request) 1072 { 1073 struct ata_channel *ch = device_get_softc(request->parent); 1074 struct ata_device *atadev = device_get_softc(request->dev); 1075 1076 request->flags &= ~ATA_R_48BIT; 1077 1078 if (((request->u.ata.lba + request->u.ata.count) >= ATA_MAX_28BIT_LBA || 1079 request->u.ata.count > 256) && 1080 atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) { 1081 1082 /* translate command into 48bit version */ 1083 switch (request->u.ata.command) { 1084 case ATA_READ: 1085 request->u.ata.command = ATA_READ48; 1086 break; 1087 case ATA_READ_MUL: 1088 request->u.ata.command = ATA_READ_MUL48; 1089 break; 1090 case ATA_READ_DMA: 1091 if (ch->flags & ATA_NO_48BIT_DMA) { 1092 if (request->transfersize > DEV_BSIZE) 1093 request->u.ata.command = ATA_READ_MUL48; 1094 else 1095 request->u.ata.command = ATA_READ48; 1096 request->flags &= ~ATA_R_DMA; 1097 } 1098 else 1099 request->u.ata.command = ATA_READ_DMA48; 1100 break; 1101 case ATA_READ_DMA_QUEUED: 1102 if (ch->flags & ATA_NO_48BIT_DMA) { 1103 if (request->transfersize > DEV_BSIZE) 1104 request->u.ata.command = ATA_READ_MUL48; 1105 else 1106 request->u.ata.command = ATA_READ48; 1107 request->flags &= ~ATA_R_DMA; 1108 } 1109 else 1110 request->u.ata.command = ATA_READ_DMA_QUEUED48; 1111 break; 1112 case ATA_WRITE: 1113 request->u.ata.command = ATA_WRITE48; 1114 break; 1115 case ATA_WRITE_MUL: 1116 request->u.ata.command = ATA_WRITE_MUL48; 1117 break; 1118 case ATA_WRITE_DMA: 1119 if (ch->flags & ATA_NO_48BIT_DMA) { 1120 if (request->transfersize > DEV_BSIZE) 1121 request->u.ata.command = ATA_WRITE_MUL48; 1122 else 1123 request->u.ata.command = ATA_WRITE48; 1124 request->flags &= ~ATA_R_DMA; 1125 } 1126 else 1127 request->u.ata.command = ATA_WRITE_DMA48; 1128 break; 1129 case ATA_WRITE_DMA_QUEUED: 1130 if (ch->flags & ATA_NO_48BIT_DMA) { 1131 if (request->transfersize > DEV_BSIZE) 1132 request->u.ata.command = ATA_WRITE_MUL48; 1133 else 1134 request->u.ata.command = ATA_WRITE48; 1135 request->u.ata.command = ATA_WRITE48; 1136 request->flags &= ~ATA_R_DMA; 1137 } 1138 else 1139 request->u.ata.command = ATA_WRITE_DMA_QUEUED48; 1140 break; 1141 case ATA_FLUSHCACHE: 1142 request->u.ata.command = ATA_FLUSHCACHE48; 1143 break; 1144 case ATA_SET_MAX_ADDRESS: 1145 request->u.ata.command = ATA_SET_MAX_ADDRESS48; 1146 break; 1147 default: 1148 return; 1149 } 1150 request->flags |= ATA_R_48BIT; 1151 } 1152 else if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) { 1153 1154 /* translate command into 48bit version */ 1155 switch (request->u.ata.command) { 1156 case ATA_FLUSHCACHE: 1157 request->u.ata.command = ATA_FLUSHCACHE48; 1158 break; 1159 case ATA_READ_NATIVE_MAX_ADDRESS: 1160 request->u.ata.command = ATA_READ_NATIVE_MAX_ADDRESS48; 1161 break; 1162 case ATA_SET_MAX_ADDRESS: 1163 request->u.ata.command = ATA_SET_MAX_ADDRESS48; 1164 break; 1165 default: 1166 return; 1167 } 1168 request->flags |= ATA_R_48BIT; 1169 } 1170 } 1171 1172 void 1173 ata_udelay(int interval) 1174 { 1175 /* for now just use DELAY, the timer/sleep subsytems are not there yet */ 1176 if (1 || interval < (1000000/hz) || ata_delayed_attach) 1177 DELAY(interval); 1178 else 1179 pause("ataslp", interval/(1000000/hz)); 1180 } 1181 1182 char * 1183 ata_unit2str(struct ata_device *atadev) 1184 { 1185 struct ata_channel *ch = device_get_softc(device_get_parent(atadev->dev)); 1186 static char str[8]; 1187 1188 if (ch->devices & ATA_PORTMULTIPLIER) 1189 sprintf(str, "port%d", atadev->unit); 1190 else 1191 sprintf(str, "%s", atadev->unit == ATA_MASTER ? "master" : "slave"); 1192 return str; 1193 } 1194 1195 const char * 1196 ata_mode2str(int mode) 1197 { 1198 switch (mode) { 1199 case -1: return "UNSUPPORTED"; 1200 case ATA_PIO0: return "PIO0"; 1201 case ATA_PIO1: return "PIO1"; 1202 case ATA_PIO2: return "PIO2"; 1203 case ATA_PIO3: return "PIO3"; 1204 case ATA_PIO4: return "PIO4"; 1205 case ATA_WDMA0: return "WDMA0"; 1206 case ATA_WDMA1: return "WDMA1"; 1207 case ATA_WDMA2: return "WDMA2"; 1208 case ATA_UDMA0: return "UDMA16"; 1209 case ATA_UDMA1: return "UDMA25"; 1210 case ATA_UDMA2: return "UDMA33"; 1211 case ATA_UDMA3: return "UDMA40"; 1212 case ATA_UDMA4: return "UDMA66"; 1213 case ATA_UDMA5: return "UDMA100"; 1214 case ATA_UDMA6: return "UDMA133"; 1215 case ATA_SA150: return "SATA150"; 1216 case ATA_SA300: return "SATA300"; 1217 default: 1218 if (mode & ATA_DMA_MASK) 1219 return "BIOSDMA"; 1220 else 1221 return "BIOSPIO"; 1222 } 1223 } 1224 1225 int 1226 ata_str2mode(const char *str) 1227 { 1228 1229 if (!strcasecmp(str, "PIO0")) return (ATA_PIO0); 1230 if (!strcasecmp(str, "PIO1")) return (ATA_PIO1); 1231 if (!strcasecmp(str, "PIO2")) return (ATA_PIO2); 1232 if (!strcasecmp(str, "PIO3")) return (ATA_PIO3); 1233 if (!strcasecmp(str, "PIO4")) return (ATA_PIO4); 1234 if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0); 1235 if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1); 1236 if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2); 1237 if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0); 1238 if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0); 1239 if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1); 1240 if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1); 1241 if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2); 1242 if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2); 1243 if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3); 1244 if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3); 1245 if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4); 1246 if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4); 1247 if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5); 1248 if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5); 1249 if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6); 1250 if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6); 1251 return (-1); 1252 } 1253 1254 const char * 1255 ata_satarev2str(int rev) 1256 { 1257 switch (rev) { 1258 case 0: return ""; 1259 case 1: return "SATA 1.5Gb/s"; 1260 case 2: return "SATA 3Gb/s"; 1261 case 3: return "SATA 6Gb/s"; 1262 case 0xff: return "SATA"; 1263 default: return "???"; 1264 } 1265 } 1266 1267 int 1268 ata_atapi(device_t dev, int target) 1269 { 1270 struct ata_channel *ch = device_get_softc(dev); 1271 1272 return (ch->devices & (ATA_ATAPI_MASTER << target)); 1273 } 1274 1275 int 1276 ata_pmode(struct ata_params *ap) 1277 { 1278 if (ap->atavalid & ATA_FLAG_64_70) { 1279 if (ap->apiomodes & 0x02) 1280 return ATA_PIO4; 1281 if (ap->apiomodes & 0x01) 1282 return ATA_PIO3; 1283 } 1284 if (ap->mwdmamodes & 0x04) 1285 return ATA_PIO4; 1286 if (ap->mwdmamodes & 0x02) 1287 return ATA_PIO3; 1288 if (ap->mwdmamodes & 0x01) 1289 return ATA_PIO2; 1290 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x200) 1291 return ATA_PIO2; 1292 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x100) 1293 return ATA_PIO1; 1294 if ((ap->retired_piomode & ATA_RETIRED_PIO_MASK) == 0x000) 1295 return ATA_PIO0; 1296 return ATA_PIO0; 1297 } 1298 1299 int 1300 ata_wmode(struct ata_params *ap) 1301 { 1302 if (ap->mwdmamodes & 0x04) 1303 return ATA_WDMA2; 1304 if (ap->mwdmamodes & 0x02) 1305 return ATA_WDMA1; 1306 if (ap->mwdmamodes & 0x01) 1307 return ATA_WDMA0; 1308 return -1; 1309 } 1310 1311 int 1312 ata_umode(struct ata_params *ap) 1313 { 1314 if (ap->atavalid & ATA_FLAG_88) { 1315 if (ap->udmamodes & 0x40) 1316 return ATA_UDMA6; 1317 if (ap->udmamodes & 0x20) 1318 return ATA_UDMA5; 1319 if (ap->udmamodes & 0x10) 1320 return ATA_UDMA4; 1321 if (ap->udmamodes & 0x08) 1322 return ATA_UDMA3; 1323 if (ap->udmamodes & 0x04) 1324 return ATA_UDMA2; 1325 if (ap->udmamodes & 0x02) 1326 return ATA_UDMA1; 1327 if (ap->udmamodes & 0x01) 1328 return ATA_UDMA0; 1329 } 1330 return -1; 1331 } 1332 1333 int 1334 ata_limit_mode(device_t dev, int mode, int maxmode) 1335 { 1336 struct ata_device *atadev = device_get_softc(dev); 1337 1338 if (maxmode && mode > maxmode) 1339 mode = maxmode; 1340 1341 if (mode >= ATA_UDMA0 && ata_umode(&atadev->param) > 0) 1342 return min(mode, ata_umode(&atadev->param)); 1343 1344 if (mode >= ATA_WDMA0 && ata_wmode(&atadev->param) > 0) 1345 return min(mode, ata_wmode(&atadev->param)); 1346 1347 if (mode > ata_pmode(&atadev->param)) 1348 return min(mode, ata_pmode(&atadev->param)); 1349 1350 return mode; 1351 } 1352 1353 static void 1354 bswap(int8_t *buf, int len) 1355 { 1356 u_int16_t *ptr = (u_int16_t*)(buf + len); 1357 1358 while (--ptr >= (u_int16_t*)buf) 1359 *ptr = ntohs(*ptr); 1360 } 1361 1362 static void 1363 btrim(int8_t *buf, int len) 1364 { 1365 int8_t *ptr; 1366 1367 for (ptr = buf; ptr < buf+len; ++ptr) 1368 if (!*ptr || *ptr == '_') 1369 *ptr = ' '; 1370 for (ptr = buf + len - 1; ptr >= buf && *ptr == ' '; --ptr) 1371 *ptr = 0; 1372 } 1373 1374 static void 1375 bpack(int8_t *src, int8_t *dst, int len) 1376 { 1377 int i, j, blank; 1378 1379 for (i = j = blank = 0 ; i < len; i++) { 1380 if (blank && src[i] == ' ') continue; 1381 if (blank && src[i] != ' ') { 1382 dst[j++] = src[i]; 1383 blank = 0; 1384 continue; 1385 } 1386 if (src[i] == ' ') { 1387 blank = 1; 1388 if (i == 0) 1389 continue; 1390 } 1391 dst[j++] = src[i]; 1392 } 1393 if (j < len) 1394 dst[j] = 0x00; 1395 } 1396 1397 #ifdef ATA_CAM 1398 void 1399 ata_cam_begin_transaction(device_t dev, union ccb *ccb) 1400 { 1401 struct ata_channel *ch = device_get_softc(dev); 1402 struct ata_request *request; 1403 1404 if (!(request = ata_alloc_request())) { 1405 device_printf(dev, "FAILURE - out of memory in start\n"); 1406 ccb->ccb_h.status = CAM_REQ_INVALID; 1407 xpt_done(ccb); 1408 return; 1409 } 1410 bzero(request, sizeof(*request)); 1411 1412 /* setup request */ 1413 request->dev = NULL; 1414 request->parent = dev; 1415 request->unit = ccb->ccb_h.target_id; 1416 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1417 request->data = ccb->ataio.data_ptr; 1418 request->bytecount = ccb->ataio.dxfer_len; 1419 request->u.ata.command = ccb->ataio.cmd.command; 1420 request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) | 1421 (uint16_t)ccb->ataio.cmd.features; 1422 request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) | 1423 (uint16_t)ccb->ataio.cmd.sector_count; 1424 if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) { 1425 request->flags |= ATA_R_48BIT; 1426 request->u.ata.lba = 1427 ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) | 1428 ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) | 1429 ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24); 1430 } else { 1431 request->u.ata.lba = 1432 ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24); 1433 } 1434 request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) | 1435 ((uint64_t)ccb->ataio.cmd.lba_mid << 8) | 1436 (uint64_t)ccb->ataio.cmd.lba_low; 1437 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1438 ccb->ataio.cmd.flags & CAM_ATAIO_DMA) 1439 request->flags |= ATA_R_DMA; 1440 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1441 request->flags |= ATA_R_READ; 1442 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 1443 request->flags |= ATA_R_WRITE; 1444 } else { 1445 request->data = ccb->csio.data_ptr; 1446 request->bytecount = ccb->csio.dxfer_len; 1447 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 1448 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, 1449 request->u.atapi.ccb, ccb->csio.cdb_len); 1450 request->flags |= ATA_R_ATAPI; 1451 if (ch->curr[ccb->ccb_h.target_id].atapi == 16) 1452 request->flags |= ATA_R_ATAPI16; 1453 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1454 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 1455 request->flags |= ATA_R_DMA; 1456 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 1457 request->flags |= ATA_R_READ; 1458 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 1459 request->flags |= ATA_R_WRITE; 1460 } 1461 request->transfersize = min(request->bytecount, 1462 ch->curr[ccb->ccb_h.target_id].bytecount); 1463 request->retries = 0; 1464 request->timeout = (ccb->ccb_h.timeout + 999) / 1000; 1465 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); 1466 request->ccb = ccb; 1467 1468 ch->running = request; 1469 ch->state = ATA_ACTIVE; 1470 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { 1471 ch->running = NULL; 1472 ch->state = ATA_IDLE; 1473 ata_cam_end_transaction(dev, request); 1474 return; 1475 } 1476 } 1477 1478 void 1479 ata_cam_end_transaction(device_t dev, struct ata_request *request) 1480 { 1481 struct ata_channel *ch = device_get_softc(dev); 1482 union ccb *ccb = request->ccb; 1483 int fatalerr = 0; 1484 1485 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1486 if (request->flags & ATA_R_TIMEOUT) { 1487 xpt_freeze_simq(ch->sim, 1); 1488 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1489 ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ; 1490 fatalerr = 1; 1491 } else if (request->status & ATA_S_ERROR) { 1492 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1493 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; 1494 } else { 1495 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1496 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1497 } 1498 } else if (request->result == ERESTART) 1499 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1500 else if (request->result != 0) 1501 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 1502 else 1503 ccb->ccb_h.status |= CAM_REQ_CMP; 1504 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && 1505 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 1506 xpt_freeze_devq(ccb->ccb_h.path, 1); 1507 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1508 } 1509 if (ccb->ccb_h.func_code == XPT_ATA_IO && 1510 ((request->status & ATA_S_ERROR) || 1511 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) { 1512 struct ata_res *res = &ccb->ataio.res; 1513 res->status = request->status; 1514 res->error = request->error; 1515 res->lba_low = request->u.ata.lba; 1516 res->lba_mid = request->u.ata.lba >> 8; 1517 res->lba_high = request->u.ata.lba >> 16; 1518 res->device = request->u.ata.lba >> 24; 1519 res->lba_low_exp = request->u.ata.lba >> 24; 1520 res->lba_mid_exp = request->u.ata.lba >> 32; 1521 res->lba_high_exp = request->u.ata.lba >> 40; 1522 res->sector_count = request->u.ata.count; 1523 res->sector_count_exp = request->u.ata.count >> 8; 1524 } 1525 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1526 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1527 ccb->ataio.resid = 1528 ccb->ataio.dxfer_len - request->donecount; 1529 } else { 1530 ccb->csio.resid = 1531 ccb->csio.dxfer_len - request->donecount; 1532 } 1533 } 1534 ata_free_request(request); 1535 xpt_done(ccb); 1536 /* Do error recovery if needed. */ 1537 if (fatalerr) 1538 ata_reinit(dev); 1539 } 1540 1541 static int 1542 ata_check_ids(device_t dev, union ccb *ccb) 1543 { 1544 struct ata_channel *ch = device_get_softc(dev); 1545 1546 if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) { 1547 ccb->ccb_h.status = CAM_TID_INVALID; 1548 xpt_done(ccb); 1549 return (-1); 1550 } 1551 if (ccb->ccb_h.target_lun != 0) { 1552 ccb->ccb_h.status = CAM_LUN_INVALID; 1553 xpt_done(ccb); 1554 return (-1); 1555 } 1556 return (0); 1557 } 1558 1559 static void 1560 ataaction(struct cam_sim *sim, union ccb *ccb) 1561 { 1562 device_t dev, parent; 1563 struct ata_channel *ch; 1564 1565 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n", 1566 ccb->ccb_h.func_code)); 1567 1568 ch = (struct ata_channel *)cam_sim_softc(sim); 1569 dev = ch->dev; 1570 switch (ccb->ccb_h.func_code) { 1571 /* Common cases first */ 1572 case XPT_ATA_IO: /* Execute the requested I/O operation */ 1573 case XPT_SCSI_IO: 1574 if (ata_check_ids(dev, ccb)) 1575 return; 1576 if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) 1577 << ccb->ccb_h.target_id)) == 0) { 1578 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 1579 break; 1580 } 1581 if (ch->running) 1582 device_printf(dev, "already running!\n"); 1583 if (ccb->ccb_h.func_code == XPT_ATA_IO && 1584 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1585 (ccb->ataio.cmd.control & ATA_A_RESET)) { 1586 struct ata_res *res = &ccb->ataio.res; 1587 1588 bzero(res, sizeof(*res)); 1589 if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) { 1590 res->lba_high = 0; 1591 res->lba_mid = 0; 1592 } else { 1593 res->lba_high = 0xeb; 1594 res->lba_mid = 0x14; 1595 } 1596 ccb->ccb_h.status = CAM_REQ_CMP; 1597 break; 1598 } 1599 ata_cam_begin_transaction(dev, ccb); 1600 return; 1601 case XPT_EN_LUN: /* Enable LUN as a target */ 1602 case XPT_TARGET_IO: /* Execute target I/O request */ 1603 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 1604 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ 1605 case XPT_ABORT: /* Abort the specified CCB */ 1606 /* XXX Implement */ 1607 ccb->ccb_h.status = CAM_REQ_INVALID; 1608 break; 1609 case XPT_SET_TRAN_SETTINGS: 1610 { 1611 struct ccb_trans_settings *cts = &ccb->cts; 1612 struct ata_cam_device *d; 1613 1614 if (ata_check_ids(dev, ccb)) 1615 return; 1616 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 1617 d = &ch->curr[ccb->ccb_h.target_id]; 1618 else 1619 d = &ch->user[ccb->ccb_h.target_id]; 1620 if (ch->flags & ATA_SATA) { 1621 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) 1622 d->revision = cts->xport_specific.sata.revision; 1623 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) { 1624 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1625 d->mode = ATA_SETMODE(ch->dev, 1626 ccb->ccb_h.target_id, 1627 cts->xport_specific.sata.mode); 1628 } else 1629 d->mode = cts->xport_specific.sata.mode; 1630 } 1631 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) 1632 d->bytecount = min(8192, cts->xport_specific.sata.bytecount); 1633 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) 1634 d->atapi = cts->xport_specific.sata.atapi; 1635 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) 1636 d->caps = cts->xport_specific.sata.caps; 1637 } else { 1638 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) { 1639 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1640 d->mode = ATA_SETMODE(ch->dev, 1641 ccb->ccb_h.target_id, 1642 cts->xport_specific.ata.mode); 1643 } else 1644 d->mode = cts->xport_specific.ata.mode; 1645 } 1646 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) 1647 d->bytecount = cts->xport_specific.ata.bytecount; 1648 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI) 1649 d->atapi = cts->xport_specific.ata.atapi; 1650 } 1651 ccb->ccb_h.status = CAM_REQ_CMP; 1652 break; 1653 } 1654 case XPT_GET_TRAN_SETTINGS: 1655 { 1656 struct ccb_trans_settings *cts = &ccb->cts; 1657 struct ata_cam_device *d; 1658 1659 if (ata_check_ids(dev, ccb)) 1660 return; 1661 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 1662 d = &ch->curr[ccb->ccb_h.target_id]; 1663 else 1664 d = &ch->user[ccb->ccb_h.target_id]; 1665 cts->protocol = PROTO_ATA; 1666 cts->protocol_version = PROTO_VERSION_UNSPECIFIED; 1667 if (ch->flags & ATA_SATA) { 1668 cts->transport = XPORT_SATA; 1669 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 1670 cts->xport_specific.sata.valid = 0; 1671 cts->xport_specific.sata.mode = d->mode; 1672 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; 1673 cts->xport_specific.sata.bytecount = d->bytecount; 1674 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; 1675 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1676 cts->xport_specific.sata.revision = 1677 ATA_GETREV(dev, ccb->ccb_h.target_id); 1678 if (cts->xport_specific.sata.revision != 0xff) { 1679 cts->xport_specific.sata.valid |= 1680 CTS_SATA_VALID_REVISION; 1681 } 1682 cts->xport_specific.sata.caps = 1683 d->caps & CTS_SATA_CAPS_D; 1684 if (ch->pm_level) { 1685 cts->xport_specific.sata.caps |= 1686 CTS_SATA_CAPS_H_PMREQ; 1687 } 1688 cts->xport_specific.sata.caps &= 1689 ch->user[ccb->ccb_h.target_id].caps; 1690 cts->xport_specific.sata.valid |= 1691 CTS_SATA_VALID_CAPS; 1692 } else { 1693 cts->xport_specific.sata.revision = d->revision; 1694 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; 1695 cts->xport_specific.sata.caps = d->caps; 1696 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 1697 } 1698 cts->xport_specific.sata.atapi = d->atapi; 1699 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; 1700 } else { 1701 cts->transport = XPORT_ATA; 1702 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 1703 cts->xport_specific.ata.valid = 0; 1704 cts->xport_specific.ata.mode = d->mode; 1705 cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE; 1706 cts->xport_specific.ata.bytecount = d->bytecount; 1707 cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT; 1708 cts->xport_specific.ata.atapi = d->atapi; 1709 cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI; 1710 } 1711 ccb->ccb_h.status = CAM_REQ_CMP; 1712 break; 1713 } 1714 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 1715 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 1716 ata_reinit(dev); 1717 ccb->ccb_h.status = CAM_REQ_CMP; 1718 break; 1719 case XPT_TERM_IO: /* Terminate the I/O process */ 1720 /* XXX Implement */ 1721 ccb->ccb_h.status = CAM_REQ_INVALID; 1722 break; 1723 case XPT_PATH_INQ: /* Path routing inquiry */ 1724 { 1725 struct ccb_pathinq *cpi = &ccb->cpi; 1726 1727 parent = device_get_parent(dev); 1728 cpi->version_num = 1; /* XXX??? */ 1729 cpi->hba_inquiry = PI_SDTR_ABLE; 1730 cpi->target_sprt = 0; 1731 cpi->hba_misc = PIM_SEQSCAN; 1732 cpi->hba_eng_cnt = 0; 1733 if (ch->flags & ATA_NO_SLAVE) 1734 cpi->max_target = 0; 1735 else 1736 cpi->max_target = 1; 1737 cpi->max_lun = 0; 1738 cpi->initiator_id = 0; 1739 cpi->bus_id = cam_sim_bus(sim); 1740 if (ch->flags & ATA_SATA) 1741 cpi->base_transfer_speed = 150000; 1742 else 1743 cpi->base_transfer_speed = 3300; 1744 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1745 strncpy(cpi->hba_vid, "ATA", HBA_IDLEN); 1746 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1747 cpi->unit_number = cam_sim_unit(sim); 1748 if (ch->flags & ATA_SATA) 1749 cpi->transport = XPORT_SATA; 1750 else 1751 cpi->transport = XPORT_ATA; 1752 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 1753 cpi->protocol = PROTO_ATA; 1754 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 1755 cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS; 1756 if (device_get_devclass(device_get_parent(parent)) == 1757 devclass_find("pci")) { 1758 cpi->hba_vendor = pci_get_vendor(parent); 1759 cpi->hba_device = pci_get_device(parent); 1760 cpi->hba_subvendor = pci_get_subvendor(parent); 1761 cpi->hba_subdevice = pci_get_subdevice(parent); 1762 } 1763 cpi->ccb_h.status = CAM_REQ_CMP; 1764 break; 1765 } 1766 default: 1767 ccb->ccb_h.status = CAM_REQ_INVALID; 1768 break; 1769 } 1770 xpt_done(ccb); 1771 } 1772 1773 static void 1774 atapoll(struct cam_sim *sim) 1775 { 1776 struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim); 1777 1778 ata_interrupt_locked(ch); 1779 } 1780 #endif 1781 1782 /* 1783 * module handeling 1784 */ 1785 static int 1786 ata_module_event_handler(module_t mod, int what, void *arg) 1787 { 1788 #ifndef ATA_CAM 1789 static struct cdev *atacdev; 1790 #endif 1791 1792 switch (what) { 1793 case MOD_LOAD: 1794 #ifndef ATA_CAM 1795 /* register controlling device */ 1796 atacdev = make_dev(&ata_cdevsw, 0, UID_ROOT, GID_OPERATOR, 0600, "ata"); 1797 1798 if (cold) { 1799 /* register boot attach to be run when interrupts are enabled */ 1800 if (!(ata_delayed_attach = (struct intr_config_hook *) 1801 malloc(sizeof(struct intr_config_hook), 1802 M_TEMP, M_NOWAIT | M_ZERO))) { 1803 printf("ata: malloc of delayed attach hook failed\n"); 1804 return EIO; 1805 } 1806 ata_delayed_attach->ich_func = (void*)ata_boot_attach; 1807 if (config_intrhook_establish(ata_delayed_attach) != 0) { 1808 printf("ata: config_intrhook_establish failed\n"); 1809 free(ata_delayed_attach, M_TEMP); 1810 } 1811 } 1812 #endif 1813 return 0; 1814 1815 case MOD_UNLOAD: 1816 #ifndef ATA_CAM 1817 /* deregister controlling device */ 1818 destroy_dev(atacdev); 1819 #endif 1820 return 0; 1821 1822 default: 1823 return EOPNOTSUPP; 1824 } 1825 } 1826 1827 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL }; 1828 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 1829 MODULE_VERSION(ata, 1); 1830 #ifdef ATA_CAM 1831 MODULE_DEPEND(ata, cam, 1, 1, 1); 1832 #endif 1833 1834 static void 1835 ata_init(void) 1836 { 1837 ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request), 1838 NULL, NULL, NULL, NULL, 0, 0); 1839 ata_composite_zone = uma_zcreate("ata_composite", 1840 sizeof(struct ata_composite), 1841 NULL, NULL, NULL, NULL, 0, 0); 1842 } 1843 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL); 1844 1845 static void 1846 ata_uninit(void) 1847 { 1848 uma_zdestroy(ata_composite_zone); 1849 uma_zdestroy(ata_request_zone); 1850 } 1851 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL); 1852