1 /*- 2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/ata.h> 33 #include <sys/kernel.h> 34 #include <sys/module.h> 35 #include <sys/endian.h> 36 #include <sys/ctype.h> 37 #include <sys/conf.h> 38 #include <sys/bus.h> 39 #include <sys/bio.h> 40 #include <sys/malloc.h> 41 #include <sys/sysctl.h> 42 #include <sys/sema.h> 43 #include <sys/taskqueue.h> 44 #include <vm/uma.h> 45 #include <machine/stdarg.h> 46 #include <machine/resource.h> 47 #include <machine/bus.h> 48 #include <sys/rman.h> 49 #include <dev/ata/ata-all.h> 50 #include <dev/pci/pcivar.h> 51 #include <ata_if.h> 52 53 #include <cam/cam.h> 54 #include <cam/cam_ccb.h> 55 #include <cam/cam_sim.h> 56 #include <cam/cam_xpt_sim.h> 57 #include <cam/cam_debug.h> 58 59 /* prototypes */ 60 static void ataaction(struct cam_sim *sim, union ccb *ccb); 61 static void atapoll(struct cam_sim *sim); 62 static void ata_cam_begin_transaction(device_t dev, union ccb *ccb); 63 static void ata_cam_end_transaction(device_t dev, struct ata_request *request); 64 static void ata_cam_request_sense(device_t dev, struct ata_request *request); 65 static int ata_check_ids(device_t dev, union ccb *ccb); 66 static void ata_conn_event(void *context, int dummy); 67 static void ata_interrupt_locked(void *data); 68 static int ata_module_event_handler(module_t mod, int what, void *arg); 69 static void ata_periodic_poll(void *data); 70 static int ata_str2mode(const char *str); 71 72 /* global vars */ 73 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer"); 74 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL; 75 devclass_t ata_devclass; 76 int ata_dma_check_80pin = 1; 77 78 /* sysctl vars */ 79 static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); 80 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin, 81 CTLFLAG_RWTUN, &ata_dma_check_80pin, 0, 82 "Check for 80pin cable before setting ATA DMA mode"); 83 FEATURE(ata_cam, "ATA devices are accessed through the cam(4) driver"); 84 85 /* 86 * newbus device interface related functions 87 */ 88 int 89 ata_probe(device_t dev) 90 { 91 return (BUS_PROBE_LOW_PRIORITY); 92 } 93 94 int 95 ata_attach(device_t dev) 96 { 97 struct ata_channel *ch = device_get_softc(dev); 98 int error, rid; 99 struct cam_devq *devq; 100 const char *res; 101 char buf[64]; 102 int i, mode; 103 104 /* check that we have a virgin channel to attach */ 105 if (ch->r_irq) 106 return EEXIST; 107 108 /* initialize the softc basics */ 109 ch->dev = dev; 110 ch->state = ATA_IDLE; 111 bzero(&ch->state_mtx, sizeof(struct mtx)); 112 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF); 113 TASK_INIT(&ch->conntask, 0, ata_conn_event, dev); 114 for (i = 0; i < 16; i++) { 115 ch->user[i].revision = 0; 116 snprintf(buf, sizeof(buf), "dev%d.sata_rev", i); 117 if (resource_int_value(device_get_name(dev), 118 device_get_unit(dev), buf, &mode) != 0 && 119 resource_int_value(device_get_name(dev), 120 device_get_unit(dev), "sata_rev", &mode) != 0) 121 mode = -1; 122 if (mode >= 0) 123 ch->user[i].revision = mode; 124 ch->user[i].mode = 0; 125 snprintf(buf, sizeof(buf), "dev%d.mode", i); 126 if (resource_string_value(device_get_name(dev), 127 device_get_unit(dev), buf, &res) == 0) 128 mode = ata_str2mode(res); 129 else if (resource_string_value(device_get_name(dev), 130 device_get_unit(dev), "mode", &res) == 0) 131 mode = ata_str2mode(res); 132 else 133 mode = -1; 134 if (mode >= 0) 135 ch->user[i].mode = mode; 136 if (ch->flags & ATA_SATA) 137 ch->user[i].bytecount = 8192; 138 else 139 ch->user[i].bytecount = MAXPHYS; 140 ch->user[i].caps = 0; 141 ch->curr[i] = ch->user[i]; 142 if (ch->flags & ATA_SATA) { 143 if (ch->pm_level > 0) 144 ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ; 145 if (ch->pm_level > 1) 146 ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ; 147 } else { 148 if (!(ch->flags & ATA_NO_48BIT_DMA)) 149 ch->user[i].caps |= CTS_ATA_CAPS_H_DMA48; 150 } 151 } 152 callout_init(&ch->poll_callout, 1); 153 154 /* allocate DMA resources if DMA HW present*/ 155 if (ch->dma.alloc) 156 ch->dma.alloc(dev); 157 158 /* setup interrupt delivery */ 159 rid = ATA_IRQ_RID; 160 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 161 RF_SHAREABLE | RF_ACTIVE); 162 if (!ch->r_irq) { 163 device_printf(dev, "unable to allocate interrupt\n"); 164 return ENXIO; 165 } 166 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 167 ata_interrupt, ch, &ch->ih))) { 168 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); 169 device_printf(dev, "unable to setup interrupt\n"); 170 return error; 171 } 172 173 if (ch->flags & ATA_PERIODIC_POLL) 174 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 175 mtx_lock(&ch->state_mtx); 176 /* Create the device queue for our SIM. */ 177 devq = cam_simq_alloc(1); 178 if (devq == NULL) { 179 device_printf(dev, "Unable to allocate simq\n"); 180 error = ENOMEM; 181 goto err1; 182 } 183 /* Construct SIM entry */ 184 ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch, 185 device_get_unit(dev), &ch->state_mtx, 1, 0, devq); 186 if (ch->sim == NULL) { 187 device_printf(dev, "unable to allocate sim\n"); 188 cam_simq_free(devq); 189 error = ENOMEM; 190 goto err1; 191 } 192 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { 193 device_printf(dev, "unable to register xpt bus\n"); 194 error = ENXIO; 195 goto err2; 196 } 197 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), 198 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 199 device_printf(dev, "unable to create path\n"); 200 error = ENXIO; 201 goto err3; 202 } 203 mtx_unlock(&ch->state_mtx); 204 return (0); 205 206 err3: 207 xpt_bus_deregister(cam_sim_path(ch->sim)); 208 err2: 209 cam_sim_free(ch->sim, /*free_devq*/TRUE); 210 ch->sim = NULL; 211 err1: 212 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); 213 mtx_unlock(&ch->state_mtx); 214 if (ch->flags & ATA_PERIODIC_POLL) 215 callout_drain(&ch->poll_callout); 216 return (error); 217 } 218 219 int 220 ata_detach(device_t dev) 221 { 222 struct ata_channel *ch = device_get_softc(dev); 223 224 /* check that we have a valid channel to detach */ 225 if (!ch->r_irq) 226 return ENXIO; 227 228 /* grap the channel lock so no new requests gets launched */ 229 mtx_lock(&ch->state_mtx); 230 ch->state |= ATA_STALL_QUEUE; 231 mtx_unlock(&ch->state_mtx); 232 if (ch->flags & ATA_PERIODIC_POLL) 233 callout_drain(&ch->poll_callout); 234 235 taskqueue_drain(taskqueue_thread, &ch->conntask); 236 237 mtx_lock(&ch->state_mtx); 238 xpt_async(AC_LOST_DEVICE, ch->path, NULL); 239 xpt_free_path(ch->path); 240 xpt_bus_deregister(cam_sim_path(ch->sim)); 241 cam_sim_free(ch->sim, /*free_devq*/TRUE); 242 ch->sim = NULL; 243 mtx_unlock(&ch->state_mtx); 244 245 /* release resources */ 246 bus_teardown_intr(dev, ch->r_irq, ch->ih); 247 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 248 ch->r_irq = NULL; 249 250 /* free DMA resources if DMA HW present*/ 251 if (ch->dma.free) 252 ch->dma.free(dev); 253 254 mtx_destroy(&ch->state_mtx); 255 return 0; 256 } 257 258 static void 259 ata_conn_event(void *context, int dummy) 260 { 261 device_t dev = (device_t)context; 262 struct ata_channel *ch = device_get_softc(dev); 263 union ccb *ccb; 264 265 mtx_lock(&ch->state_mtx); 266 if (ch->sim == NULL) { 267 mtx_unlock(&ch->state_mtx); 268 return; 269 } 270 ata_reinit(dev); 271 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 272 return; 273 if (xpt_create_path(&ccb->ccb_h.path, NULL, 274 cam_sim_path(ch->sim), 275 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 276 xpt_free_ccb(ccb); 277 return; 278 } 279 xpt_rescan(ccb); 280 mtx_unlock(&ch->state_mtx); 281 } 282 283 int 284 ata_reinit(device_t dev) 285 { 286 struct ata_channel *ch = device_get_softc(dev); 287 struct ata_request *request; 288 289 xpt_freeze_simq(ch->sim, 1); 290 if ((request = ch->running)) { 291 ch->running = NULL; 292 if (ch->state == ATA_ACTIVE) 293 ch->state = ATA_IDLE; 294 callout_stop(&request->callout); 295 if (ch->dma.unload) 296 ch->dma.unload(request); 297 request->result = ERESTART; 298 ata_cam_end_transaction(dev, request); 299 } 300 /* reset the controller HW, the channel and device(s) */ 301 ATA_RESET(dev); 302 /* Tell the XPT about the event */ 303 xpt_async(AC_BUS_RESET, ch->path, NULL); 304 xpt_release_simq(ch->sim, TRUE); 305 return(0); 306 } 307 308 int 309 ata_suspend(device_t dev) 310 { 311 struct ata_channel *ch; 312 313 /* check for valid device */ 314 if (!dev || !(ch = device_get_softc(dev))) 315 return ENXIO; 316 317 if (ch->flags & ATA_PERIODIC_POLL) 318 callout_drain(&ch->poll_callout); 319 mtx_lock(&ch->state_mtx); 320 xpt_freeze_simq(ch->sim, 1); 321 while (ch->state != ATA_IDLE) 322 msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100); 323 mtx_unlock(&ch->state_mtx); 324 return(0); 325 } 326 327 int 328 ata_resume(device_t dev) 329 { 330 struct ata_channel *ch; 331 int error; 332 333 /* check for valid device */ 334 if (!dev || !(ch = device_get_softc(dev))) 335 return ENXIO; 336 337 mtx_lock(&ch->state_mtx); 338 error = ata_reinit(dev); 339 xpt_release_simq(ch->sim, TRUE); 340 mtx_unlock(&ch->state_mtx); 341 if (ch->flags & ATA_PERIODIC_POLL) 342 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 343 return error; 344 } 345 346 void 347 ata_interrupt(void *data) 348 { 349 struct ata_channel *ch = (struct ata_channel *)data; 350 351 mtx_lock(&ch->state_mtx); 352 ata_interrupt_locked(data); 353 mtx_unlock(&ch->state_mtx); 354 } 355 356 static void 357 ata_interrupt_locked(void *data) 358 { 359 struct ata_channel *ch = (struct ata_channel *)data; 360 struct ata_request *request; 361 362 /* ignore interrupt if its not for us */ 363 if (ch->hw.status && !ch->hw.status(ch->dev)) 364 return; 365 366 /* do we have a running request */ 367 if (!(request = ch->running)) 368 return; 369 370 ATA_DEBUG_RQ(request, "interrupt"); 371 372 /* safetycheck for the right state */ 373 if (ch->state == ATA_IDLE) { 374 device_printf(request->dev, "interrupt on idle channel ignored\n"); 375 return; 376 } 377 378 /* 379 * we have the HW locks, so end the transaction for this request 380 * if it finishes immediately otherwise wait for next interrupt 381 */ 382 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) { 383 ch->running = NULL; 384 if (ch->state == ATA_ACTIVE) 385 ch->state = ATA_IDLE; 386 ata_cam_end_transaction(ch->dev, request); 387 return; 388 } 389 } 390 391 static void 392 ata_periodic_poll(void *data) 393 { 394 struct ata_channel *ch = (struct ata_channel *)data; 395 396 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 397 ata_interrupt(ch); 398 } 399 400 void 401 ata_print_cable(device_t dev, u_int8_t *who) 402 { 403 device_printf(dev, 404 "DMA limited to UDMA33, %s found non-ATA66 cable\n", who); 405 } 406 407 /* 408 * misc support functions 409 */ 410 void 411 ata_default_registers(device_t dev) 412 { 413 struct ata_channel *ch = device_get_softc(dev); 414 415 /* fill in the defaults from whats setup already */ 416 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res; 417 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset; 418 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res; 419 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset; 420 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res; 421 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset; 422 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res; 423 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset; 424 } 425 426 void 427 ata_udelay(int interval) 428 { 429 /* for now just use DELAY, the timer/sleep subsytems are not there yet */ 430 if (1 || interval < (1000000/hz) || ata_delayed_attach) 431 DELAY(interval); 432 else 433 pause("ataslp", interval/(1000000/hz)); 434 } 435 436 const char * 437 ata_cmd2str(struct ata_request *request) 438 { 439 static char buffer[20]; 440 441 if (request->flags & ATA_R_ATAPI) { 442 switch (request->u.atapi.sense.key ? 443 request->u.atapi.saved_cmd : request->u.atapi.ccb[0]) { 444 case 0x00: return ("TEST_UNIT_READY"); 445 case 0x01: return ("REZERO"); 446 case 0x03: return ("REQUEST_SENSE"); 447 case 0x04: return ("FORMAT"); 448 case 0x08: return ("READ"); 449 case 0x0a: return ("WRITE"); 450 case 0x10: return ("WEOF"); 451 case 0x11: return ("SPACE"); 452 case 0x12: return ("INQUIRY"); 453 case 0x15: return ("MODE_SELECT"); 454 case 0x19: return ("ERASE"); 455 case 0x1a: return ("MODE_SENSE"); 456 case 0x1b: return ("START_STOP"); 457 case 0x1e: return ("PREVENT_ALLOW"); 458 case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES"); 459 case 0x25: return ("READ_CAPACITY"); 460 case 0x28: return ("READ_BIG"); 461 case 0x2a: return ("WRITE_BIG"); 462 case 0x2b: return ("LOCATE"); 463 case 0x34: return ("READ_POSITION"); 464 case 0x35: return ("SYNCHRONIZE_CACHE"); 465 case 0x3b: return ("WRITE_BUFFER"); 466 case 0x3c: return ("READ_BUFFER"); 467 case 0x42: return ("READ_SUBCHANNEL"); 468 case 0x43: return ("READ_TOC"); 469 case 0x45: return ("PLAY_10"); 470 case 0x47: return ("PLAY_MSF"); 471 case 0x48: return ("PLAY_TRACK"); 472 case 0x4b: return ("PAUSE"); 473 case 0x51: return ("READ_DISK_INFO"); 474 case 0x52: return ("READ_TRACK_INFO"); 475 case 0x53: return ("RESERVE_TRACK"); 476 case 0x54: return ("SEND_OPC_INFO"); 477 case 0x55: return ("MODE_SELECT_BIG"); 478 case 0x58: return ("REPAIR_TRACK"); 479 case 0x59: return ("READ_MASTER_CUE"); 480 case 0x5a: return ("MODE_SENSE_BIG"); 481 case 0x5b: return ("CLOSE_TRACK/SESSION"); 482 case 0x5c: return ("READ_BUFFER_CAPACITY"); 483 case 0x5d: return ("SEND_CUE_SHEET"); 484 case 0x96: return ("SERVICE_ACTION_IN"); 485 case 0xa1: return ("BLANK_CMD"); 486 case 0xa3: return ("SEND_KEY"); 487 case 0xa4: return ("REPORT_KEY"); 488 case 0xa5: return ("PLAY_12"); 489 case 0xa6: return ("LOAD_UNLOAD"); 490 case 0xad: return ("READ_DVD_STRUCTURE"); 491 case 0xb4: return ("PLAY_CD"); 492 case 0xbb: return ("SET_SPEED"); 493 case 0xbd: return ("MECH_STATUS"); 494 case 0xbe: return ("READ_CD"); 495 case 0xff: return ("POLL_DSC"); 496 } 497 } else { 498 switch (request->u.ata.command) { 499 case 0x00: return ("NOP"); 500 case 0x08: return ("DEVICE_RESET"); 501 case 0x20: return ("READ"); 502 case 0x24: return ("READ48"); 503 case 0x25: return ("READ_DMA48"); 504 case 0x26: return ("READ_DMA_QUEUED48"); 505 case 0x27: return ("READ_NATIVE_MAX_ADDRESS48"); 506 case 0x29: return ("READ_MUL48"); 507 case 0x30: return ("WRITE"); 508 case 0x34: return ("WRITE48"); 509 case 0x35: return ("WRITE_DMA48"); 510 case 0x36: return ("WRITE_DMA_QUEUED48"); 511 case 0x37: return ("SET_MAX_ADDRESS48"); 512 case 0x39: return ("WRITE_MUL48"); 513 case 0x70: return ("SEEK"); 514 case 0xa0: return ("PACKET_CMD"); 515 case 0xa1: return ("ATAPI_IDENTIFY"); 516 case 0xa2: return ("SERVICE"); 517 case 0xb0: return ("SMART"); 518 case 0xc0: return ("CFA ERASE"); 519 case 0xc4: return ("READ_MUL"); 520 case 0xc5: return ("WRITE_MUL"); 521 case 0xc6: return ("SET_MULTI"); 522 case 0xc7: return ("READ_DMA_QUEUED"); 523 case 0xc8: return ("READ_DMA"); 524 case 0xca: return ("WRITE_DMA"); 525 case 0xcc: return ("WRITE_DMA_QUEUED"); 526 case 0xe6: return ("SLEEP"); 527 case 0xe7: return ("FLUSHCACHE"); 528 case 0xea: return ("FLUSHCACHE48"); 529 case 0xec: return ("ATA_IDENTIFY"); 530 case 0xef: 531 switch (request->u.ata.feature) { 532 case 0x03: return ("SETFEATURES SET TRANSFER MODE"); 533 case 0x02: return ("SETFEATURES ENABLE WCACHE"); 534 case 0x82: return ("SETFEATURES DISABLE WCACHE"); 535 case 0xaa: return ("SETFEATURES ENABLE RCACHE"); 536 case 0x55: return ("SETFEATURES DISABLE RCACHE"); 537 } 538 sprintf(buffer, "SETFEATURES 0x%02x", 539 request->u.ata.feature); 540 return (buffer); 541 case 0xf5: return ("SECURITY_FREE_LOCK"); 542 case 0xf8: return ("READ_NATIVE_MAX_ADDRESS"); 543 case 0xf9: return ("SET_MAX_ADDRESS"); 544 } 545 } 546 sprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command); 547 return (buffer); 548 } 549 550 const char * 551 ata_mode2str(int mode) 552 { 553 switch (mode) { 554 case -1: return "UNSUPPORTED"; 555 case ATA_PIO0: return "PIO0"; 556 case ATA_PIO1: return "PIO1"; 557 case ATA_PIO2: return "PIO2"; 558 case ATA_PIO3: return "PIO3"; 559 case ATA_PIO4: return "PIO4"; 560 case ATA_WDMA0: return "WDMA0"; 561 case ATA_WDMA1: return "WDMA1"; 562 case ATA_WDMA2: return "WDMA2"; 563 case ATA_UDMA0: return "UDMA16"; 564 case ATA_UDMA1: return "UDMA25"; 565 case ATA_UDMA2: return "UDMA33"; 566 case ATA_UDMA3: return "UDMA40"; 567 case ATA_UDMA4: return "UDMA66"; 568 case ATA_UDMA5: return "UDMA100"; 569 case ATA_UDMA6: return "UDMA133"; 570 case ATA_SA150: return "SATA150"; 571 case ATA_SA300: return "SATA300"; 572 case ATA_SA600: return "SATA600"; 573 default: 574 if (mode & ATA_DMA_MASK) 575 return "BIOSDMA"; 576 else 577 return "BIOSPIO"; 578 } 579 } 580 581 static int 582 ata_str2mode(const char *str) 583 { 584 585 if (!strcasecmp(str, "PIO0")) return (ATA_PIO0); 586 if (!strcasecmp(str, "PIO1")) return (ATA_PIO1); 587 if (!strcasecmp(str, "PIO2")) return (ATA_PIO2); 588 if (!strcasecmp(str, "PIO3")) return (ATA_PIO3); 589 if (!strcasecmp(str, "PIO4")) return (ATA_PIO4); 590 if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0); 591 if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1); 592 if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2); 593 if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0); 594 if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0); 595 if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1); 596 if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1); 597 if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2); 598 if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2); 599 if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3); 600 if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3); 601 if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4); 602 if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4); 603 if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5); 604 if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5); 605 if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6); 606 if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6); 607 return (-1); 608 } 609 610 int 611 ata_atapi(device_t dev, int target) 612 { 613 struct ata_channel *ch = device_get_softc(dev); 614 615 return (ch->devices & (ATA_ATAPI_MASTER << target)); 616 } 617 618 void 619 ata_timeout(struct ata_request *request) 620 { 621 struct ata_channel *ch; 622 623 ch = device_get_softc(request->parent); 624 //request->flags |= ATA_R_DEBUG; 625 ATA_DEBUG_RQ(request, "timeout"); 626 627 /* 628 * If we have an ATA_ACTIVE request running, we flag the request 629 * ATA_R_TIMEOUT so ata_cam_end_transaction() will handle it correctly. 630 * Also, NULL out the running request so we wont loose the race with 631 * an eventual interrupt arriving late. 632 */ 633 if (ch->state == ATA_ACTIVE) { 634 request->flags |= ATA_R_TIMEOUT; 635 if (ch->dma.unload) 636 ch->dma.unload(request); 637 ch->running = NULL; 638 ch->state = ATA_IDLE; 639 ata_cam_end_transaction(ch->dev, request); 640 } 641 mtx_unlock(&ch->state_mtx); 642 } 643 644 static void 645 ata_cam_begin_transaction(device_t dev, union ccb *ccb) 646 { 647 struct ata_channel *ch = device_get_softc(dev); 648 struct ata_request *request; 649 650 request = &ch->request; 651 bzero(request, sizeof(*request)); 652 653 /* setup request */ 654 request->dev = NULL; 655 request->parent = dev; 656 request->unit = ccb->ccb_h.target_id; 657 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 658 request->data = ccb->ataio.data_ptr; 659 request->bytecount = ccb->ataio.dxfer_len; 660 request->u.ata.command = ccb->ataio.cmd.command; 661 request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) | 662 (uint16_t)ccb->ataio.cmd.features; 663 request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) | 664 (uint16_t)ccb->ataio.cmd.sector_count; 665 if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) { 666 request->flags |= ATA_R_48BIT; 667 request->u.ata.lba = 668 ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) | 669 ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) | 670 ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24); 671 } else { 672 request->u.ata.lba = 673 ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24); 674 } 675 request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) | 676 ((uint64_t)ccb->ataio.cmd.lba_mid << 8) | 677 (uint64_t)ccb->ataio.cmd.lba_low; 678 if (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT) 679 request->flags |= ATA_R_NEEDRESULT; 680 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 681 ccb->ataio.cmd.flags & CAM_ATAIO_DMA) 682 request->flags |= ATA_R_DMA; 683 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 684 request->flags |= ATA_R_READ; 685 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 686 request->flags |= ATA_R_WRITE; 687 if (ccb->ataio.cmd.command == ATA_READ_MUL || 688 ccb->ataio.cmd.command == ATA_READ_MUL48 || 689 ccb->ataio.cmd.command == ATA_WRITE_MUL || 690 ccb->ataio.cmd.command == ATA_WRITE_MUL48) { 691 request->transfersize = min(request->bytecount, 692 ch->curr[ccb->ccb_h.target_id].bytecount); 693 } else 694 request->transfersize = min(request->bytecount, 512); 695 } else { 696 request->data = ccb->csio.data_ptr; 697 request->bytecount = ccb->csio.dxfer_len; 698 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 699 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, 700 request->u.atapi.ccb, ccb->csio.cdb_len); 701 request->flags |= ATA_R_ATAPI; 702 if (ch->curr[ccb->ccb_h.target_id].atapi == 16) 703 request->flags |= ATA_R_ATAPI16; 704 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 705 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 706 request->flags |= ATA_R_DMA; 707 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 708 request->flags |= ATA_R_READ; 709 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 710 request->flags |= ATA_R_WRITE; 711 request->transfersize = min(request->bytecount, 712 ch->curr[ccb->ccb_h.target_id].bytecount); 713 } 714 request->retries = 0; 715 request->timeout = (ccb->ccb_h.timeout + 999) / 1000; 716 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); 717 request->ccb = ccb; 718 request->flags |= ATA_R_DATA_IN_CCB; 719 720 ch->running = request; 721 ch->state = ATA_ACTIVE; 722 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { 723 ch->running = NULL; 724 ch->state = ATA_IDLE; 725 ata_cam_end_transaction(dev, request); 726 return; 727 } 728 } 729 730 static void 731 ata_cam_request_sense(device_t dev, struct ata_request *request) 732 { 733 struct ata_channel *ch = device_get_softc(dev); 734 union ccb *ccb = request->ccb; 735 736 ch->requestsense = 1; 737 738 bzero(request, sizeof(*request)); 739 request->dev = NULL; 740 request->parent = dev; 741 request->unit = ccb->ccb_h.target_id; 742 request->data = (void *)&ccb->csio.sense_data; 743 request->bytecount = ccb->csio.sense_len; 744 request->u.atapi.ccb[0] = ATAPI_REQUEST_SENSE; 745 request->u.atapi.ccb[4] = ccb->csio.sense_len; 746 request->flags |= ATA_R_ATAPI; 747 if (ch->curr[ccb->ccb_h.target_id].atapi == 16) 748 request->flags |= ATA_R_ATAPI16; 749 if (ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 750 request->flags |= ATA_R_DMA; 751 request->flags |= ATA_R_READ; 752 request->transfersize = min(request->bytecount, 753 ch->curr[ccb->ccb_h.target_id].bytecount); 754 request->retries = 0; 755 request->timeout = (ccb->ccb_h.timeout + 999) / 1000; 756 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); 757 request->ccb = ccb; 758 759 ch->running = request; 760 ch->state = ATA_ACTIVE; 761 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { 762 ch->running = NULL; 763 ch->state = ATA_IDLE; 764 ata_cam_end_transaction(dev, request); 765 return; 766 } 767 } 768 769 static void 770 ata_cam_process_sense(device_t dev, struct ata_request *request) 771 { 772 struct ata_channel *ch = device_get_softc(dev); 773 union ccb *ccb = request->ccb; 774 int fatalerr = 0; 775 776 ch->requestsense = 0; 777 778 if (request->flags & ATA_R_TIMEOUT) 779 fatalerr = 1; 780 if ((request->flags & ATA_R_TIMEOUT) == 0 && 781 (request->status & ATA_S_ERROR) == 0 && 782 request->result == 0) { 783 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 784 } else { 785 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 786 ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 787 } 788 789 xpt_done(ccb); 790 /* Do error recovery if needed. */ 791 if (fatalerr) 792 ata_reinit(dev); 793 } 794 795 static void 796 ata_cam_end_transaction(device_t dev, struct ata_request *request) 797 { 798 struct ata_channel *ch = device_get_softc(dev); 799 union ccb *ccb = request->ccb; 800 int fatalerr = 0; 801 802 if (ch->requestsense) { 803 ata_cam_process_sense(dev, request); 804 return; 805 } 806 807 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 808 if (request->flags & ATA_R_TIMEOUT) { 809 xpt_freeze_simq(ch->sim, 1); 810 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 811 ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ; 812 fatalerr = 1; 813 } else if (request->status & ATA_S_ERROR) { 814 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 815 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; 816 } else { 817 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 818 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 819 } 820 } else if (request->result == ERESTART) 821 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 822 else if (request->result != 0) 823 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 824 else 825 ccb->ccb_h.status |= CAM_REQ_CMP; 826 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && 827 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 828 xpt_freeze_devq(ccb->ccb_h.path, 1); 829 ccb->ccb_h.status |= CAM_DEV_QFRZN; 830 } 831 if (ccb->ccb_h.func_code == XPT_ATA_IO && 832 ((request->status & ATA_S_ERROR) || 833 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) { 834 struct ata_res *res = &ccb->ataio.res; 835 res->status = request->status; 836 res->error = request->error; 837 res->lba_low = request->u.ata.lba; 838 res->lba_mid = request->u.ata.lba >> 8; 839 res->lba_high = request->u.ata.lba >> 16; 840 res->device = request->u.ata.lba >> 24; 841 res->lba_low_exp = request->u.ata.lba >> 24; 842 res->lba_mid_exp = request->u.ata.lba >> 32; 843 res->lba_high_exp = request->u.ata.lba >> 40; 844 res->sector_count = request->u.ata.count; 845 res->sector_count_exp = request->u.ata.count >> 8; 846 } 847 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 848 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 849 ccb->ataio.resid = 850 ccb->ataio.dxfer_len - request->donecount; 851 } else { 852 ccb->csio.resid = 853 ccb->csio.dxfer_len - request->donecount; 854 } 855 } 856 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && 857 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 858 ata_cam_request_sense(dev, request); 859 else 860 xpt_done(ccb); 861 /* Do error recovery if needed. */ 862 if (fatalerr) 863 ata_reinit(dev); 864 } 865 866 static int 867 ata_check_ids(device_t dev, union ccb *ccb) 868 { 869 struct ata_channel *ch = device_get_softc(dev); 870 871 if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) { 872 ccb->ccb_h.status = CAM_TID_INVALID; 873 xpt_done(ccb); 874 return (-1); 875 } 876 if (ccb->ccb_h.target_lun != 0) { 877 ccb->ccb_h.status = CAM_LUN_INVALID; 878 xpt_done(ccb); 879 return (-1); 880 } 881 return (0); 882 } 883 884 static void 885 ataaction(struct cam_sim *sim, union ccb *ccb) 886 { 887 device_t dev, parent; 888 struct ata_channel *ch; 889 890 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n", 891 ccb->ccb_h.func_code)); 892 893 ch = (struct ata_channel *)cam_sim_softc(sim); 894 dev = ch->dev; 895 switch (ccb->ccb_h.func_code) { 896 /* Common cases first */ 897 case XPT_ATA_IO: /* Execute the requested I/O operation */ 898 case XPT_SCSI_IO: 899 if (ata_check_ids(dev, ccb)) 900 return; 901 if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) 902 << ccb->ccb_h.target_id)) == 0) { 903 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 904 break; 905 } 906 if (ch->running) 907 device_printf(dev, "already running!\n"); 908 if (ccb->ccb_h.func_code == XPT_ATA_IO && 909 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 910 (ccb->ataio.cmd.control & ATA_A_RESET)) { 911 struct ata_res *res = &ccb->ataio.res; 912 913 bzero(res, sizeof(*res)); 914 if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) { 915 res->lba_high = 0; 916 res->lba_mid = 0; 917 } else { 918 res->lba_high = 0xeb; 919 res->lba_mid = 0x14; 920 } 921 ccb->ccb_h.status = CAM_REQ_CMP; 922 break; 923 } 924 ata_cam_begin_transaction(dev, ccb); 925 return; 926 case XPT_EN_LUN: /* Enable LUN as a target */ 927 case XPT_TARGET_IO: /* Execute target I/O request */ 928 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 929 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ 930 case XPT_ABORT: /* Abort the specified CCB */ 931 /* XXX Implement */ 932 ccb->ccb_h.status = CAM_REQ_INVALID; 933 break; 934 case XPT_SET_TRAN_SETTINGS: 935 { 936 struct ccb_trans_settings *cts = &ccb->cts; 937 struct ata_cam_device *d; 938 939 if (ata_check_ids(dev, ccb)) 940 return; 941 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 942 d = &ch->curr[ccb->ccb_h.target_id]; 943 else 944 d = &ch->user[ccb->ccb_h.target_id]; 945 if (ch->flags & ATA_SATA) { 946 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) 947 d->revision = cts->xport_specific.sata.revision; 948 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) { 949 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 950 d->mode = ATA_SETMODE(ch->dev, 951 ccb->ccb_h.target_id, 952 cts->xport_specific.sata.mode); 953 } else 954 d->mode = cts->xport_specific.sata.mode; 955 } 956 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) 957 d->bytecount = min(8192, cts->xport_specific.sata.bytecount); 958 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) 959 d->atapi = cts->xport_specific.sata.atapi; 960 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) 961 d->caps = cts->xport_specific.sata.caps; 962 } else { 963 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) { 964 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 965 d->mode = ATA_SETMODE(ch->dev, 966 ccb->ccb_h.target_id, 967 cts->xport_specific.ata.mode); 968 } else 969 d->mode = cts->xport_specific.ata.mode; 970 } 971 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) 972 d->bytecount = cts->xport_specific.ata.bytecount; 973 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI) 974 d->atapi = cts->xport_specific.ata.atapi; 975 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_CAPS) 976 d->caps = cts->xport_specific.ata.caps; 977 } 978 ccb->ccb_h.status = CAM_REQ_CMP; 979 break; 980 } 981 case XPT_GET_TRAN_SETTINGS: 982 { 983 struct ccb_trans_settings *cts = &ccb->cts; 984 struct ata_cam_device *d; 985 986 if (ata_check_ids(dev, ccb)) 987 return; 988 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 989 d = &ch->curr[ccb->ccb_h.target_id]; 990 else 991 d = &ch->user[ccb->ccb_h.target_id]; 992 cts->protocol = PROTO_UNSPECIFIED; 993 cts->protocol_version = PROTO_VERSION_UNSPECIFIED; 994 if (ch->flags & ATA_SATA) { 995 cts->transport = XPORT_SATA; 996 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 997 cts->xport_specific.sata.valid = 0; 998 cts->xport_specific.sata.mode = d->mode; 999 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; 1000 cts->xport_specific.sata.bytecount = d->bytecount; 1001 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; 1002 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1003 cts->xport_specific.sata.revision = 1004 ATA_GETREV(dev, ccb->ccb_h.target_id); 1005 if (cts->xport_specific.sata.revision != 0xff) { 1006 cts->xport_specific.sata.valid |= 1007 CTS_SATA_VALID_REVISION; 1008 } 1009 cts->xport_specific.sata.caps = 1010 d->caps & CTS_SATA_CAPS_D; 1011 if (ch->pm_level) { 1012 cts->xport_specific.sata.caps |= 1013 CTS_SATA_CAPS_H_PMREQ; 1014 } 1015 cts->xport_specific.sata.caps &= 1016 ch->user[ccb->ccb_h.target_id].caps; 1017 } else { 1018 cts->xport_specific.sata.revision = d->revision; 1019 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; 1020 cts->xport_specific.sata.caps = d->caps; 1021 } 1022 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 1023 cts->xport_specific.sata.atapi = d->atapi; 1024 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; 1025 } else { 1026 cts->transport = XPORT_ATA; 1027 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 1028 cts->xport_specific.ata.valid = 0; 1029 cts->xport_specific.ata.mode = d->mode; 1030 cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE; 1031 cts->xport_specific.ata.bytecount = d->bytecount; 1032 cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT; 1033 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1034 cts->xport_specific.ata.caps = 1035 d->caps & CTS_ATA_CAPS_D; 1036 if (!(ch->flags & ATA_NO_48BIT_DMA)) 1037 cts->xport_specific.ata.caps |= 1038 CTS_ATA_CAPS_H_DMA48; 1039 cts->xport_specific.ata.caps &= 1040 ch->user[ccb->ccb_h.target_id].caps; 1041 } else 1042 cts->xport_specific.ata.caps = d->caps; 1043 cts->xport_specific.ata.valid |= CTS_ATA_VALID_CAPS; 1044 cts->xport_specific.ata.atapi = d->atapi; 1045 cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI; 1046 } 1047 ccb->ccb_h.status = CAM_REQ_CMP; 1048 break; 1049 } 1050 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 1051 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 1052 ata_reinit(dev); 1053 ccb->ccb_h.status = CAM_REQ_CMP; 1054 break; 1055 case XPT_TERM_IO: /* Terminate the I/O process */ 1056 /* XXX Implement */ 1057 ccb->ccb_h.status = CAM_REQ_INVALID; 1058 break; 1059 case XPT_PATH_INQ: /* Path routing inquiry */ 1060 { 1061 struct ccb_pathinq *cpi = &ccb->cpi; 1062 1063 parent = device_get_parent(dev); 1064 cpi->version_num = 1; /* XXX??? */ 1065 cpi->hba_inquiry = PI_SDTR_ABLE; 1066 cpi->target_sprt = 0; 1067 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; 1068 cpi->hba_eng_cnt = 0; 1069 if (ch->flags & ATA_NO_SLAVE) 1070 cpi->max_target = 0; 1071 else 1072 cpi->max_target = 1; 1073 cpi->max_lun = 0; 1074 cpi->initiator_id = 0; 1075 cpi->bus_id = cam_sim_bus(sim); 1076 if (ch->flags & ATA_SATA) 1077 cpi->base_transfer_speed = 150000; 1078 else 1079 cpi->base_transfer_speed = 3300; 1080 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1081 strncpy(cpi->hba_vid, "ATA", HBA_IDLEN); 1082 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1083 cpi->unit_number = cam_sim_unit(sim); 1084 if (ch->flags & ATA_SATA) 1085 cpi->transport = XPORT_SATA; 1086 else 1087 cpi->transport = XPORT_ATA; 1088 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 1089 cpi->protocol = PROTO_ATA; 1090 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 1091 cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS; 1092 if (device_get_devclass(device_get_parent(parent)) == 1093 devclass_find("pci")) { 1094 cpi->hba_vendor = pci_get_vendor(parent); 1095 cpi->hba_device = pci_get_device(parent); 1096 cpi->hba_subvendor = pci_get_subvendor(parent); 1097 cpi->hba_subdevice = pci_get_subdevice(parent); 1098 } 1099 cpi->ccb_h.status = CAM_REQ_CMP; 1100 break; 1101 } 1102 default: 1103 ccb->ccb_h.status = CAM_REQ_INVALID; 1104 break; 1105 } 1106 xpt_done(ccb); 1107 } 1108 1109 static void 1110 atapoll(struct cam_sim *sim) 1111 { 1112 struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim); 1113 1114 ata_interrupt_locked(ch); 1115 } 1116 1117 /* 1118 * module handeling 1119 */ 1120 static int 1121 ata_module_event_handler(module_t mod, int what, void *arg) 1122 { 1123 1124 switch (what) { 1125 case MOD_LOAD: 1126 return 0; 1127 1128 case MOD_UNLOAD: 1129 return 0; 1130 1131 default: 1132 return EOPNOTSUPP; 1133 } 1134 } 1135 1136 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL }; 1137 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 1138 MODULE_VERSION(ata, 1); 1139 MODULE_DEPEND(ata, cam, 1, 1, 1); 1140