1 /*- 2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/ata.h> 33 #include <sys/kernel.h> 34 #include <sys/module.h> 35 #include <sys/endian.h> 36 #include <sys/ctype.h> 37 #include <sys/conf.h> 38 #include <sys/bus.h> 39 #include <sys/bio.h> 40 #include <sys/malloc.h> 41 #include <sys/sysctl.h> 42 #include <sys/sema.h> 43 #include <sys/taskqueue.h> 44 #include <vm/uma.h> 45 #include <machine/stdarg.h> 46 #include <machine/resource.h> 47 #include <machine/bus.h> 48 #include <sys/rman.h> 49 #include <dev/ata/ata-all.h> 50 #include <dev/pci/pcivar.h> 51 #include <ata_if.h> 52 53 #include <cam/cam.h> 54 #include <cam/cam_ccb.h> 55 #include <cam/cam_sim.h> 56 #include <cam/cam_xpt_sim.h> 57 #include <cam/cam_debug.h> 58 59 /* prototypes */ 60 static void ataaction(struct cam_sim *sim, union ccb *ccb); 61 static void atapoll(struct cam_sim *sim); 62 static void ata_cam_begin_transaction(device_t dev, union ccb *ccb); 63 static void ata_cam_end_transaction(device_t dev, struct ata_request *request); 64 static void ata_cam_request_sense(device_t dev, struct ata_request *request); 65 static int ata_check_ids(device_t dev, union ccb *ccb); 66 static void ata_conn_event(void *context, int dummy); 67 static void ata_init(void); 68 static void ata_interrupt_locked(void *data); 69 static int ata_module_event_handler(module_t mod, int what, void *arg); 70 static void ata_periodic_poll(void *data); 71 static int ata_str2mode(const char *str); 72 static void ata_uninit(void); 73 74 /* global vars */ 75 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer"); 76 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL; 77 devclass_t ata_devclass; 78 uma_zone_t ata_request_zone; 79 int ata_dma_check_80pin = 1; 80 81 /* sysctl vars */ 82 static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); 83 TUNABLE_INT("hw.ata.ata_dma_check_80pin", &ata_dma_check_80pin); 84 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin, 85 CTLFLAG_RW, &ata_dma_check_80pin, 1, 86 "Check for 80pin cable before setting ATA DMA mode"); 87 FEATURE(ata_cam, "ATA devices are accessed through the cam(4) driver"); 88 89 /* 90 * newbus device interface related functions 91 */ 92 int 93 ata_probe(device_t dev) 94 { 95 return 0; 96 } 97 98 int 99 ata_attach(device_t dev) 100 { 101 struct ata_channel *ch = device_get_softc(dev); 102 int error, rid; 103 struct cam_devq *devq; 104 const char *res; 105 char buf[64]; 106 int i, mode; 107 108 /* check that we have a virgin channel to attach */ 109 if (ch->r_irq) 110 return EEXIST; 111 112 /* initialize the softc basics */ 113 ch->dev = dev; 114 ch->state = ATA_IDLE; 115 bzero(&ch->state_mtx, sizeof(struct mtx)); 116 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF); 117 TASK_INIT(&ch->conntask, 0, ata_conn_event, dev); 118 for (i = 0; i < 16; i++) { 119 ch->user[i].revision = 0; 120 snprintf(buf, sizeof(buf), "dev%d.sata_rev", i); 121 if (resource_int_value(device_get_name(dev), 122 device_get_unit(dev), buf, &mode) != 0 && 123 resource_int_value(device_get_name(dev), 124 device_get_unit(dev), "sata_rev", &mode) != 0) 125 mode = -1; 126 if (mode >= 0) 127 ch->user[i].revision = mode; 128 ch->user[i].mode = 0; 129 snprintf(buf, sizeof(buf), "dev%d.mode", i); 130 if (resource_string_value(device_get_name(dev), 131 device_get_unit(dev), buf, &res) == 0) 132 mode = ata_str2mode(res); 133 else if (resource_string_value(device_get_name(dev), 134 device_get_unit(dev), "mode", &res) == 0) 135 mode = ata_str2mode(res); 136 else 137 mode = -1; 138 if (mode >= 0) 139 ch->user[i].mode = mode; 140 if (ch->flags & ATA_SATA) 141 ch->user[i].bytecount = 8192; 142 else 143 ch->user[i].bytecount = MAXPHYS; 144 ch->user[i].caps = 0; 145 ch->curr[i] = ch->user[i]; 146 if (ch->flags & ATA_SATA) { 147 if (ch->pm_level > 0) 148 ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ; 149 if (ch->pm_level > 1) 150 ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ; 151 } else { 152 if (!(ch->flags & ATA_NO_48BIT_DMA)) 153 ch->user[i].caps |= CTS_ATA_CAPS_H_DMA48; 154 } 155 } 156 callout_init(&ch->poll_callout, 1); 157 158 /* allocate DMA resources if DMA HW present*/ 159 if (ch->dma.alloc) 160 ch->dma.alloc(dev); 161 162 /* setup interrupt delivery */ 163 rid = ATA_IRQ_RID; 164 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 165 RF_SHAREABLE | RF_ACTIVE); 166 if (!ch->r_irq) { 167 device_printf(dev, "unable to allocate interrupt\n"); 168 return ENXIO; 169 } 170 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 171 ata_interrupt, ch, &ch->ih))) { 172 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); 173 device_printf(dev, "unable to setup interrupt\n"); 174 return error; 175 } 176 177 if (ch->flags & ATA_PERIODIC_POLL) 178 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 179 mtx_lock(&ch->state_mtx); 180 /* Create the device queue for our SIM. */ 181 devq = cam_simq_alloc(1); 182 if (devq == NULL) { 183 device_printf(dev, "Unable to allocate simq\n"); 184 error = ENOMEM; 185 goto err1; 186 } 187 /* Construct SIM entry */ 188 ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch, 189 device_get_unit(dev), &ch->state_mtx, 1, 0, devq); 190 if (ch->sim == NULL) { 191 device_printf(dev, "unable to allocate sim\n"); 192 cam_simq_free(devq); 193 error = ENOMEM; 194 goto err1; 195 } 196 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { 197 device_printf(dev, "unable to register xpt bus\n"); 198 error = ENXIO; 199 goto err2; 200 } 201 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), 202 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 203 device_printf(dev, "unable to create path\n"); 204 error = ENXIO; 205 goto err3; 206 } 207 mtx_unlock(&ch->state_mtx); 208 return (0); 209 210 err3: 211 xpt_bus_deregister(cam_sim_path(ch->sim)); 212 err2: 213 cam_sim_free(ch->sim, /*free_devq*/TRUE); 214 ch->sim = NULL; 215 err1: 216 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); 217 mtx_unlock(&ch->state_mtx); 218 if (ch->flags & ATA_PERIODIC_POLL) 219 callout_drain(&ch->poll_callout); 220 return (error); 221 } 222 223 int 224 ata_detach(device_t dev) 225 { 226 struct ata_channel *ch = device_get_softc(dev); 227 228 /* check that we have a valid channel to detach */ 229 if (!ch->r_irq) 230 return ENXIO; 231 232 /* grap the channel lock so no new requests gets launched */ 233 mtx_lock(&ch->state_mtx); 234 ch->state |= ATA_STALL_QUEUE; 235 mtx_unlock(&ch->state_mtx); 236 if (ch->flags & ATA_PERIODIC_POLL) 237 callout_drain(&ch->poll_callout); 238 239 taskqueue_drain(taskqueue_thread, &ch->conntask); 240 241 mtx_lock(&ch->state_mtx); 242 xpt_async(AC_LOST_DEVICE, ch->path, NULL); 243 xpt_free_path(ch->path); 244 xpt_bus_deregister(cam_sim_path(ch->sim)); 245 cam_sim_free(ch->sim, /*free_devq*/TRUE); 246 ch->sim = NULL; 247 mtx_unlock(&ch->state_mtx); 248 249 /* release resources */ 250 bus_teardown_intr(dev, ch->r_irq, ch->ih); 251 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 252 ch->r_irq = NULL; 253 254 /* free DMA resources if DMA HW present*/ 255 if (ch->dma.free) 256 ch->dma.free(dev); 257 258 mtx_destroy(&ch->state_mtx); 259 return 0; 260 } 261 262 static void 263 ata_conn_event(void *context, int dummy) 264 { 265 device_t dev = (device_t)context; 266 struct ata_channel *ch = device_get_softc(dev); 267 union ccb *ccb; 268 269 mtx_lock(&ch->state_mtx); 270 if (ch->sim == NULL) { 271 mtx_unlock(&ch->state_mtx); 272 return; 273 } 274 ata_reinit(dev); 275 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 276 return; 277 if (xpt_create_path(&ccb->ccb_h.path, NULL, 278 cam_sim_path(ch->sim), 279 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 280 xpt_free_ccb(ccb); 281 return; 282 } 283 xpt_rescan(ccb); 284 mtx_unlock(&ch->state_mtx); 285 } 286 287 int 288 ata_reinit(device_t dev) 289 { 290 struct ata_channel *ch = device_get_softc(dev); 291 struct ata_request *request; 292 293 xpt_freeze_simq(ch->sim, 1); 294 if ((request = ch->running)) { 295 ch->running = NULL; 296 if (ch->state == ATA_ACTIVE) 297 ch->state = ATA_IDLE; 298 callout_stop(&request->callout); 299 if (ch->dma.unload) 300 ch->dma.unload(request); 301 request->result = ERESTART; 302 ata_cam_end_transaction(dev, request); 303 } 304 /* reset the controller HW, the channel and device(s) */ 305 ATA_RESET(dev); 306 /* Tell the XPT about the event */ 307 xpt_async(AC_BUS_RESET, ch->path, NULL); 308 xpt_release_simq(ch->sim, TRUE); 309 return(0); 310 } 311 312 int 313 ata_suspend(device_t dev) 314 { 315 struct ata_channel *ch; 316 317 /* check for valid device */ 318 if (!dev || !(ch = device_get_softc(dev))) 319 return ENXIO; 320 321 if (ch->flags & ATA_PERIODIC_POLL) 322 callout_drain(&ch->poll_callout); 323 mtx_lock(&ch->state_mtx); 324 xpt_freeze_simq(ch->sim, 1); 325 while (ch->state != ATA_IDLE) 326 msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100); 327 mtx_unlock(&ch->state_mtx); 328 return(0); 329 } 330 331 int 332 ata_resume(device_t dev) 333 { 334 struct ata_channel *ch; 335 int error; 336 337 /* check for valid device */ 338 if (!dev || !(ch = device_get_softc(dev))) 339 return ENXIO; 340 341 mtx_lock(&ch->state_mtx); 342 error = ata_reinit(dev); 343 xpt_release_simq(ch->sim, TRUE); 344 mtx_unlock(&ch->state_mtx); 345 if (ch->flags & ATA_PERIODIC_POLL) 346 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 347 return error; 348 } 349 350 void 351 ata_interrupt(void *data) 352 { 353 struct ata_channel *ch = (struct ata_channel *)data; 354 355 mtx_lock(&ch->state_mtx); 356 xpt_batch_start(ch->sim); 357 ata_interrupt_locked(data); 358 xpt_batch_done(ch->sim); 359 mtx_unlock(&ch->state_mtx); 360 } 361 362 static void 363 ata_interrupt_locked(void *data) 364 { 365 struct ata_channel *ch = (struct ata_channel *)data; 366 struct ata_request *request; 367 368 do { 369 /* ignore interrupt if its not for us */ 370 if (ch->hw.status && !ch->hw.status(ch->dev)) 371 break; 372 373 /* do we have a running request */ 374 if (!(request = ch->running)) 375 break; 376 377 ATA_DEBUG_RQ(request, "interrupt"); 378 379 /* safetycheck for the right state */ 380 if (ch->state == ATA_IDLE) { 381 device_printf(request->dev, "interrupt on idle channel ignored\n"); 382 break; 383 } 384 385 /* 386 * we have the HW locks, so end the transaction for this request 387 * if it finishes immediately otherwise wait for next interrupt 388 */ 389 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) { 390 ch->running = NULL; 391 if (ch->state == ATA_ACTIVE) 392 ch->state = ATA_IDLE; 393 ata_cam_end_transaction(ch->dev, request); 394 return; 395 } 396 } while (0); 397 } 398 399 static void 400 ata_periodic_poll(void *data) 401 { 402 struct ata_channel *ch = (struct ata_channel *)data; 403 404 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 405 ata_interrupt(ch); 406 } 407 408 void 409 ata_print_cable(device_t dev, u_int8_t *who) 410 { 411 device_printf(dev, 412 "DMA limited to UDMA33, %s found non-ATA66 cable\n", who); 413 } 414 415 /* 416 * misc support functions 417 */ 418 void 419 ata_default_registers(device_t dev) 420 { 421 struct ata_channel *ch = device_get_softc(dev); 422 423 /* fill in the defaults from whats setup already */ 424 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res; 425 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset; 426 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res; 427 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset; 428 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res; 429 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset; 430 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res; 431 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset; 432 } 433 434 void 435 ata_udelay(int interval) 436 { 437 /* for now just use DELAY, the timer/sleep subsytems are not there yet */ 438 if (1 || interval < (1000000/hz) || ata_delayed_attach) 439 DELAY(interval); 440 else 441 pause("ataslp", interval/(1000000/hz)); 442 } 443 444 const char * 445 ata_cmd2str(struct ata_request *request) 446 { 447 static char buffer[20]; 448 449 if (request->flags & ATA_R_ATAPI) { 450 switch (request->u.atapi.sense.key ? 451 request->u.atapi.saved_cmd : request->u.atapi.ccb[0]) { 452 case 0x00: return ("TEST_UNIT_READY"); 453 case 0x01: return ("REZERO"); 454 case 0x03: return ("REQUEST_SENSE"); 455 case 0x04: return ("FORMAT"); 456 case 0x08: return ("READ"); 457 case 0x0a: return ("WRITE"); 458 case 0x10: return ("WEOF"); 459 case 0x11: return ("SPACE"); 460 case 0x12: return ("INQUIRY"); 461 case 0x15: return ("MODE_SELECT"); 462 case 0x19: return ("ERASE"); 463 case 0x1a: return ("MODE_SENSE"); 464 case 0x1b: return ("START_STOP"); 465 case 0x1e: return ("PREVENT_ALLOW"); 466 case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES"); 467 case 0x25: return ("READ_CAPACITY"); 468 case 0x28: return ("READ_BIG"); 469 case 0x2a: return ("WRITE_BIG"); 470 case 0x2b: return ("LOCATE"); 471 case 0x34: return ("READ_POSITION"); 472 case 0x35: return ("SYNCHRONIZE_CACHE"); 473 case 0x3b: return ("WRITE_BUFFER"); 474 case 0x3c: return ("READ_BUFFER"); 475 case 0x42: return ("READ_SUBCHANNEL"); 476 case 0x43: return ("READ_TOC"); 477 case 0x45: return ("PLAY_10"); 478 case 0x47: return ("PLAY_MSF"); 479 case 0x48: return ("PLAY_TRACK"); 480 case 0x4b: return ("PAUSE"); 481 case 0x51: return ("READ_DISK_INFO"); 482 case 0x52: return ("READ_TRACK_INFO"); 483 case 0x53: return ("RESERVE_TRACK"); 484 case 0x54: return ("SEND_OPC_INFO"); 485 case 0x55: return ("MODE_SELECT_BIG"); 486 case 0x58: return ("REPAIR_TRACK"); 487 case 0x59: return ("READ_MASTER_CUE"); 488 case 0x5a: return ("MODE_SENSE_BIG"); 489 case 0x5b: return ("CLOSE_TRACK/SESSION"); 490 case 0x5c: return ("READ_BUFFER_CAPACITY"); 491 case 0x5d: return ("SEND_CUE_SHEET"); 492 case 0x96: return ("SERVICE_ACTION_IN"); 493 case 0xa1: return ("BLANK_CMD"); 494 case 0xa3: return ("SEND_KEY"); 495 case 0xa4: return ("REPORT_KEY"); 496 case 0xa5: return ("PLAY_12"); 497 case 0xa6: return ("LOAD_UNLOAD"); 498 case 0xad: return ("READ_DVD_STRUCTURE"); 499 case 0xb4: return ("PLAY_CD"); 500 case 0xbb: return ("SET_SPEED"); 501 case 0xbd: return ("MECH_STATUS"); 502 case 0xbe: return ("READ_CD"); 503 case 0xff: return ("POLL_DSC"); 504 } 505 } else { 506 switch (request->u.ata.command) { 507 case 0x00: return ("NOP"); 508 case 0x08: return ("DEVICE_RESET"); 509 case 0x20: return ("READ"); 510 case 0x24: return ("READ48"); 511 case 0x25: return ("READ_DMA48"); 512 case 0x26: return ("READ_DMA_QUEUED48"); 513 case 0x27: return ("READ_NATIVE_MAX_ADDRESS48"); 514 case 0x29: return ("READ_MUL48"); 515 case 0x30: return ("WRITE"); 516 case 0x34: return ("WRITE48"); 517 case 0x35: return ("WRITE_DMA48"); 518 case 0x36: return ("WRITE_DMA_QUEUED48"); 519 case 0x37: return ("SET_MAX_ADDRESS48"); 520 case 0x39: return ("WRITE_MUL48"); 521 case 0x70: return ("SEEK"); 522 case 0xa0: return ("PACKET_CMD"); 523 case 0xa1: return ("ATAPI_IDENTIFY"); 524 case 0xa2: return ("SERVICE"); 525 case 0xb0: return ("SMART"); 526 case 0xc0: return ("CFA ERASE"); 527 case 0xc4: return ("READ_MUL"); 528 case 0xc5: return ("WRITE_MUL"); 529 case 0xc6: return ("SET_MULTI"); 530 case 0xc7: return ("READ_DMA_QUEUED"); 531 case 0xc8: return ("READ_DMA"); 532 case 0xca: return ("WRITE_DMA"); 533 case 0xcc: return ("WRITE_DMA_QUEUED"); 534 case 0xe6: return ("SLEEP"); 535 case 0xe7: return ("FLUSHCACHE"); 536 case 0xea: return ("FLUSHCACHE48"); 537 case 0xec: return ("ATA_IDENTIFY"); 538 case 0xef: 539 switch (request->u.ata.feature) { 540 case 0x03: return ("SETFEATURES SET TRANSFER MODE"); 541 case 0x02: return ("SETFEATURES ENABLE WCACHE"); 542 case 0x82: return ("SETFEATURES DISABLE WCACHE"); 543 case 0xaa: return ("SETFEATURES ENABLE RCACHE"); 544 case 0x55: return ("SETFEATURES DISABLE RCACHE"); 545 } 546 sprintf(buffer, "SETFEATURES 0x%02x", 547 request->u.ata.feature); 548 return (buffer); 549 case 0xf5: return ("SECURITY_FREE_LOCK"); 550 case 0xf8: return ("READ_NATIVE_MAX_ADDRESS"); 551 case 0xf9: return ("SET_MAX_ADDRESS"); 552 } 553 } 554 sprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command); 555 return (buffer); 556 } 557 558 const char * 559 ata_mode2str(int mode) 560 { 561 switch (mode) { 562 case -1: return "UNSUPPORTED"; 563 case ATA_PIO0: return "PIO0"; 564 case ATA_PIO1: return "PIO1"; 565 case ATA_PIO2: return "PIO2"; 566 case ATA_PIO3: return "PIO3"; 567 case ATA_PIO4: return "PIO4"; 568 case ATA_WDMA0: return "WDMA0"; 569 case ATA_WDMA1: return "WDMA1"; 570 case ATA_WDMA2: return "WDMA2"; 571 case ATA_UDMA0: return "UDMA16"; 572 case ATA_UDMA1: return "UDMA25"; 573 case ATA_UDMA2: return "UDMA33"; 574 case ATA_UDMA3: return "UDMA40"; 575 case ATA_UDMA4: return "UDMA66"; 576 case ATA_UDMA5: return "UDMA100"; 577 case ATA_UDMA6: return "UDMA133"; 578 case ATA_SA150: return "SATA150"; 579 case ATA_SA300: return "SATA300"; 580 default: 581 if (mode & ATA_DMA_MASK) 582 return "BIOSDMA"; 583 else 584 return "BIOSPIO"; 585 } 586 } 587 588 static int 589 ata_str2mode(const char *str) 590 { 591 592 if (!strcasecmp(str, "PIO0")) return (ATA_PIO0); 593 if (!strcasecmp(str, "PIO1")) return (ATA_PIO1); 594 if (!strcasecmp(str, "PIO2")) return (ATA_PIO2); 595 if (!strcasecmp(str, "PIO3")) return (ATA_PIO3); 596 if (!strcasecmp(str, "PIO4")) return (ATA_PIO4); 597 if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0); 598 if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1); 599 if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2); 600 if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0); 601 if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0); 602 if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1); 603 if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1); 604 if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2); 605 if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2); 606 if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3); 607 if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3); 608 if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4); 609 if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4); 610 if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5); 611 if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5); 612 if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6); 613 if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6); 614 return (-1); 615 } 616 617 int 618 ata_atapi(device_t dev, int target) 619 { 620 struct ata_channel *ch = device_get_softc(dev); 621 622 return (ch->devices & (ATA_ATAPI_MASTER << target)); 623 } 624 625 void 626 ata_timeout(struct ata_request *request) 627 { 628 struct ata_channel *ch; 629 630 ch = device_get_softc(request->parent); 631 //request->flags |= ATA_R_DEBUG; 632 ATA_DEBUG_RQ(request, "timeout"); 633 634 /* 635 * If we have an ATA_ACTIVE request running, we flag the request 636 * ATA_R_TIMEOUT so ata_cam_end_transaction() will handle it correctly. 637 * Also, NULL out the running request so we wont loose the race with 638 * an eventual interrupt arriving late. 639 */ 640 if (ch->state == ATA_ACTIVE) { 641 request->flags |= ATA_R_TIMEOUT; 642 if (ch->dma.unload) 643 ch->dma.unload(request); 644 ch->running = NULL; 645 ch->state = ATA_IDLE; 646 ata_cam_end_transaction(ch->dev, request); 647 } 648 mtx_unlock(&ch->state_mtx); 649 } 650 651 static void 652 ata_cam_begin_transaction(device_t dev, union ccb *ccb) 653 { 654 struct ata_channel *ch = device_get_softc(dev); 655 struct ata_request *request; 656 657 if (!(request = ata_alloc_request())) { 658 device_printf(dev, "FAILURE - out of memory in start\n"); 659 ccb->ccb_h.status = CAM_REQ_INVALID; 660 xpt_done(ccb); 661 return; 662 } 663 bzero(request, sizeof(*request)); 664 665 /* setup request */ 666 request->dev = NULL; 667 request->parent = dev; 668 request->unit = ccb->ccb_h.target_id; 669 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 670 request->data = ccb->ataio.data_ptr; 671 request->bytecount = ccb->ataio.dxfer_len; 672 request->u.ata.command = ccb->ataio.cmd.command; 673 request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) | 674 (uint16_t)ccb->ataio.cmd.features; 675 request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) | 676 (uint16_t)ccb->ataio.cmd.sector_count; 677 if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) { 678 request->flags |= ATA_R_48BIT; 679 request->u.ata.lba = 680 ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) | 681 ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) | 682 ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24); 683 } else { 684 request->u.ata.lba = 685 ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24); 686 } 687 request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) | 688 ((uint64_t)ccb->ataio.cmd.lba_mid << 8) | 689 (uint64_t)ccb->ataio.cmd.lba_low; 690 if (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT) 691 request->flags |= ATA_R_NEEDRESULT; 692 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 693 ccb->ataio.cmd.flags & CAM_ATAIO_DMA) 694 request->flags |= ATA_R_DMA; 695 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 696 request->flags |= ATA_R_READ; 697 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 698 request->flags |= ATA_R_WRITE; 699 if (ccb->ataio.cmd.command == ATA_READ_MUL || 700 ccb->ataio.cmd.command == ATA_READ_MUL48 || 701 ccb->ataio.cmd.command == ATA_WRITE_MUL || 702 ccb->ataio.cmd.command == ATA_WRITE_MUL48) { 703 request->transfersize = min(request->bytecount, 704 ch->curr[ccb->ccb_h.target_id].bytecount); 705 } else 706 request->transfersize = min(request->bytecount, 512); 707 } else { 708 request->data = ccb->csio.data_ptr; 709 request->bytecount = ccb->csio.dxfer_len; 710 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 711 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, 712 request->u.atapi.ccb, ccb->csio.cdb_len); 713 request->flags |= ATA_R_ATAPI; 714 if (ch->curr[ccb->ccb_h.target_id].atapi == 16) 715 request->flags |= ATA_R_ATAPI16; 716 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 717 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 718 request->flags |= ATA_R_DMA; 719 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 720 request->flags |= ATA_R_READ; 721 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 722 request->flags |= ATA_R_WRITE; 723 request->transfersize = min(request->bytecount, 724 ch->curr[ccb->ccb_h.target_id].bytecount); 725 } 726 request->retries = 0; 727 request->timeout = (ccb->ccb_h.timeout + 999) / 1000; 728 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); 729 request->ccb = ccb; 730 request->flags |= ATA_R_DATA_IN_CCB; 731 732 ch->running = request; 733 ch->state = ATA_ACTIVE; 734 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { 735 ch->running = NULL; 736 ch->state = ATA_IDLE; 737 ata_cam_end_transaction(dev, request); 738 return; 739 } 740 } 741 742 static void 743 ata_cam_request_sense(device_t dev, struct ata_request *request) 744 { 745 struct ata_channel *ch = device_get_softc(dev); 746 union ccb *ccb = request->ccb; 747 748 ch->requestsense = 1; 749 750 bzero(request, sizeof(*request)); 751 request->dev = NULL; 752 request->parent = dev; 753 request->unit = ccb->ccb_h.target_id; 754 request->data = (void *)&ccb->csio.sense_data; 755 request->bytecount = ccb->csio.sense_len; 756 request->u.atapi.ccb[0] = ATAPI_REQUEST_SENSE; 757 request->u.atapi.ccb[4] = ccb->csio.sense_len; 758 request->flags |= ATA_R_ATAPI; 759 if (ch->curr[ccb->ccb_h.target_id].atapi == 16) 760 request->flags |= ATA_R_ATAPI16; 761 if (ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 762 request->flags |= ATA_R_DMA; 763 request->flags |= ATA_R_READ; 764 request->transfersize = min(request->bytecount, 765 ch->curr[ccb->ccb_h.target_id].bytecount); 766 request->retries = 0; 767 request->timeout = (ccb->ccb_h.timeout + 999) / 1000; 768 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); 769 request->ccb = ccb; 770 771 ch->running = request; 772 ch->state = ATA_ACTIVE; 773 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { 774 ch->running = NULL; 775 ch->state = ATA_IDLE; 776 ata_cam_end_transaction(dev, request); 777 return; 778 } 779 } 780 781 static void 782 ata_cam_process_sense(device_t dev, struct ata_request *request) 783 { 784 struct ata_channel *ch = device_get_softc(dev); 785 union ccb *ccb = request->ccb; 786 int fatalerr = 0; 787 788 ch->requestsense = 0; 789 790 if (request->flags & ATA_R_TIMEOUT) 791 fatalerr = 1; 792 if ((request->flags & ATA_R_TIMEOUT) == 0 && 793 (request->status & ATA_S_ERROR) == 0 && 794 request->result == 0) { 795 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 796 } else { 797 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 798 ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 799 } 800 801 ata_free_request(request); 802 xpt_done(ccb); 803 /* Do error recovery if needed. */ 804 if (fatalerr) 805 ata_reinit(dev); 806 } 807 808 static void 809 ata_cam_end_transaction(device_t dev, struct ata_request *request) 810 { 811 struct ata_channel *ch = device_get_softc(dev); 812 union ccb *ccb = request->ccb; 813 int fatalerr = 0; 814 815 if (ch->requestsense) { 816 ata_cam_process_sense(dev, request); 817 return; 818 } 819 820 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 821 if (request->flags & ATA_R_TIMEOUT) { 822 xpt_freeze_simq(ch->sim, 1); 823 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 824 ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ; 825 fatalerr = 1; 826 } else if (request->status & ATA_S_ERROR) { 827 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 828 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; 829 } else { 830 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 831 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 832 } 833 } else if (request->result == ERESTART) 834 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 835 else if (request->result != 0) 836 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 837 else 838 ccb->ccb_h.status |= CAM_REQ_CMP; 839 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && 840 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 841 xpt_freeze_devq(ccb->ccb_h.path, 1); 842 ccb->ccb_h.status |= CAM_DEV_QFRZN; 843 } 844 if (ccb->ccb_h.func_code == XPT_ATA_IO && 845 ((request->status & ATA_S_ERROR) || 846 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) { 847 struct ata_res *res = &ccb->ataio.res; 848 res->status = request->status; 849 res->error = request->error; 850 res->lba_low = request->u.ata.lba; 851 res->lba_mid = request->u.ata.lba >> 8; 852 res->lba_high = request->u.ata.lba >> 16; 853 res->device = request->u.ata.lba >> 24; 854 res->lba_low_exp = request->u.ata.lba >> 24; 855 res->lba_mid_exp = request->u.ata.lba >> 32; 856 res->lba_high_exp = request->u.ata.lba >> 40; 857 res->sector_count = request->u.ata.count; 858 res->sector_count_exp = request->u.ata.count >> 8; 859 } 860 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 861 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 862 ccb->ataio.resid = 863 ccb->ataio.dxfer_len - request->donecount; 864 } else { 865 ccb->csio.resid = 866 ccb->csio.dxfer_len - request->donecount; 867 } 868 } 869 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && 870 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 871 ata_cam_request_sense(dev, request); 872 else { 873 ata_free_request(request); 874 xpt_done(ccb); 875 } 876 /* Do error recovery if needed. */ 877 if (fatalerr) 878 ata_reinit(dev); 879 } 880 881 static int 882 ata_check_ids(device_t dev, union ccb *ccb) 883 { 884 struct ata_channel *ch = device_get_softc(dev); 885 886 if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) { 887 ccb->ccb_h.status = CAM_TID_INVALID; 888 xpt_done(ccb); 889 return (-1); 890 } 891 if (ccb->ccb_h.target_lun != 0) { 892 ccb->ccb_h.status = CAM_LUN_INVALID; 893 xpt_done(ccb); 894 return (-1); 895 } 896 return (0); 897 } 898 899 static void 900 ataaction(struct cam_sim *sim, union ccb *ccb) 901 { 902 device_t dev, parent; 903 struct ata_channel *ch; 904 905 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n", 906 ccb->ccb_h.func_code)); 907 908 ch = (struct ata_channel *)cam_sim_softc(sim); 909 dev = ch->dev; 910 switch (ccb->ccb_h.func_code) { 911 /* Common cases first */ 912 case XPT_ATA_IO: /* Execute the requested I/O operation */ 913 case XPT_SCSI_IO: 914 if (ata_check_ids(dev, ccb)) 915 return; 916 if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) 917 << ccb->ccb_h.target_id)) == 0) { 918 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 919 break; 920 } 921 if (ch->running) 922 device_printf(dev, "already running!\n"); 923 if (ccb->ccb_h.func_code == XPT_ATA_IO && 924 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 925 (ccb->ataio.cmd.control & ATA_A_RESET)) { 926 struct ata_res *res = &ccb->ataio.res; 927 928 bzero(res, sizeof(*res)); 929 if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) { 930 res->lba_high = 0; 931 res->lba_mid = 0; 932 } else { 933 res->lba_high = 0xeb; 934 res->lba_mid = 0x14; 935 } 936 ccb->ccb_h.status = CAM_REQ_CMP; 937 break; 938 } 939 ata_cam_begin_transaction(dev, ccb); 940 return; 941 case XPT_EN_LUN: /* Enable LUN as a target */ 942 case XPT_TARGET_IO: /* Execute target I/O request */ 943 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 944 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ 945 case XPT_ABORT: /* Abort the specified CCB */ 946 /* XXX Implement */ 947 ccb->ccb_h.status = CAM_REQ_INVALID; 948 break; 949 case XPT_SET_TRAN_SETTINGS: 950 { 951 struct ccb_trans_settings *cts = &ccb->cts; 952 struct ata_cam_device *d; 953 954 if (ata_check_ids(dev, ccb)) 955 return; 956 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 957 d = &ch->curr[ccb->ccb_h.target_id]; 958 else 959 d = &ch->user[ccb->ccb_h.target_id]; 960 if (ch->flags & ATA_SATA) { 961 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) 962 d->revision = cts->xport_specific.sata.revision; 963 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) { 964 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 965 d->mode = ATA_SETMODE(ch->dev, 966 ccb->ccb_h.target_id, 967 cts->xport_specific.sata.mode); 968 } else 969 d->mode = cts->xport_specific.sata.mode; 970 } 971 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) 972 d->bytecount = min(8192, cts->xport_specific.sata.bytecount); 973 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) 974 d->atapi = cts->xport_specific.sata.atapi; 975 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) 976 d->caps = cts->xport_specific.sata.caps; 977 } else { 978 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) { 979 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 980 d->mode = ATA_SETMODE(ch->dev, 981 ccb->ccb_h.target_id, 982 cts->xport_specific.ata.mode); 983 } else 984 d->mode = cts->xport_specific.ata.mode; 985 } 986 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) 987 d->bytecount = cts->xport_specific.ata.bytecount; 988 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI) 989 d->atapi = cts->xport_specific.ata.atapi; 990 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_CAPS) 991 d->caps = cts->xport_specific.ata.caps; 992 } 993 ccb->ccb_h.status = CAM_REQ_CMP; 994 break; 995 } 996 case XPT_GET_TRAN_SETTINGS: 997 { 998 struct ccb_trans_settings *cts = &ccb->cts; 999 struct ata_cam_device *d; 1000 1001 if (ata_check_ids(dev, ccb)) 1002 return; 1003 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 1004 d = &ch->curr[ccb->ccb_h.target_id]; 1005 else 1006 d = &ch->user[ccb->ccb_h.target_id]; 1007 cts->protocol = PROTO_UNSPECIFIED; 1008 cts->protocol_version = PROTO_VERSION_UNSPECIFIED; 1009 if (ch->flags & ATA_SATA) { 1010 cts->transport = XPORT_SATA; 1011 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 1012 cts->xport_specific.sata.valid = 0; 1013 cts->xport_specific.sata.mode = d->mode; 1014 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; 1015 cts->xport_specific.sata.bytecount = d->bytecount; 1016 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; 1017 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1018 cts->xport_specific.sata.revision = 1019 ATA_GETREV(dev, ccb->ccb_h.target_id); 1020 if (cts->xport_specific.sata.revision != 0xff) { 1021 cts->xport_specific.sata.valid |= 1022 CTS_SATA_VALID_REVISION; 1023 } 1024 cts->xport_specific.sata.caps = 1025 d->caps & CTS_SATA_CAPS_D; 1026 if (ch->pm_level) { 1027 cts->xport_specific.sata.caps |= 1028 CTS_SATA_CAPS_H_PMREQ; 1029 } 1030 cts->xport_specific.sata.caps &= 1031 ch->user[ccb->ccb_h.target_id].caps; 1032 } else { 1033 cts->xport_specific.sata.revision = d->revision; 1034 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; 1035 cts->xport_specific.sata.caps = d->caps; 1036 } 1037 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 1038 cts->xport_specific.sata.atapi = d->atapi; 1039 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; 1040 } else { 1041 cts->transport = XPORT_ATA; 1042 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 1043 cts->xport_specific.ata.valid = 0; 1044 cts->xport_specific.ata.mode = d->mode; 1045 cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE; 1046 cts->xport_specific.ata.bytecount = d->bytecount; 1047 cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT; 1048 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1049 cts->xport_specific.ata.caps = 1050 d->caps & CTS_ATA_CAPS_D; 1051 if (!(ch->flags & ATA_NO_48BIT_DMA)) 1052 cts->xport_specific.ata.caps |= 1053 CTS_ATA_CAPS_H_DMA48; 1054 cts->xport_specific.ata.caps &= 1055 ch->user[ccb->ccb_h.target_id].caps; 1056 } else 1057 cts->xport_specific.ata.caps = d->caps; 1058 cts->xport_specific.ata.valid |= CTS_ATA_VALID_CAPS; 1059 cts->xport_specific.ata.atapi = d->atapi; 1060 cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI; 1061 } 1062 ccb->ccb_h.status = CAM_REQ_CMP; 1063 break; 1064 } 1065 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 1066 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 1067 ata_reinit(dev); 1068 ccb->ccb_h.status = CAM_REQ_CMP; 1069 break; 1070 case XPT_TERM_IO: /* Terminate the I/O process */ 1071 /* XXX Implement */ 1072 ccb->ccb_h.status = CAM_REQ_INVALID; 1073 break; 1074 case XPT_PATH_INQ: /* Path routing inquiry */ 1075 { 1076 struct ccb_pathinq *cpi = &ccb->cpi; 1077 1078 parent = device_get_parent(dev); 1079 cpi->version_num = 1; /* XXX??? */ 1080 cpi->hba_inquiry = PI_SDTR_ABLE; 1081 cpi->target_sprt = 0; 1082 cpi->hba_misc = PIM_SEQSCAN; 1083 cpi->hba_eng_cnt = 0; 1084 if (ch->flags & ATA_NO_SLAVE) 1085 cpi->max_target = 0; 1086 else 1087 cpi->max_target = 1; 1088 cpi->max_lun = 0; 1089 cpi->initiator_id = 0; 1090 cpi->bus_id = cam_sim_bus(sim); 1091 if (ch->flags & ATA_SATA) 1092 cpi->base_transfer_speed = 150000; 1093 else 1094 cpi->base_transfer_speed = 3300; 1095 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1096 strncpy(cpi->hba_vid, "ATA", HBA_IDLEN); 1097 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1098 cpi->unit_number = cam_sim_unit(sim); 1099 if (ch->flags & ATA_SATA) 1100 cpi->transport = XPORT_SATA; 1101 else 1102 cpi->transport = XPORT_ATA; 1103 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 1104 cpi->protocol = PROTO_ATA; 1105 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 1106 cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS; 1107 if (device_get_devclass(device_get_parent(parent)) == 1108 devclass_find("pci")) { 1109 cpi->hba_vendor = pci_get_vendor(parent); 1110 cpi->hba_device = pci_get_device(parent); 1111 cpi->hba_subvendor = pci_get_subvendor(parent); 1112 cpi->hba_subdevice = pci_get_subdevice(parent); 1113 } 1114 cpi->ccb_h.status = CAM_REQ_CMP; 1115 break; 1116 } 1117 default: 1118 ccb->ccb_h.status = CAM_REQ_INVALID; 1119 break; 1120 } 1121 xpt_done(ccb); 1122 } 1123 1124 static void 1125 atapoll(struct cam_sim *sim) 1126 { 1127 struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim); 1128 1129 ata_interrupt_locked(ch); 1130 } 1131 1132 /* 1133 * module handeling 1134 */ 1135 static int 1136 ata_module_event_handler(module_t mod, int what, void *arg) 1137 { 1138 1139 switch (what) { 1140 case MOD_LOAD: 1141 return 0; 1142 1143 case MOD_UNLOAD: 1144 return 0; 1145 1146 default: 1147 return EOPNOTSUPP; 1148 } 1149 } 1150 1151 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL }; 1152 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 1153 MODULE_VERSION(ata, 1); 1154 MODULE_DEPEND(ata, cam, 1, 1, 1); 1155 1156 static void 1157 ata_init(void) 1158 { 1159 ata_request_zone = uma_zcreate("ata_request", sizeof(struct ata_request), 1160 NULL, NULL, NULL, NULL, 0, 0); 1161 } 1162 SYSINIT(ata_register, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_init, NULL); 1163 1164 static void 1165 ata_uninit(void) 1166 { 1167 uma_zdestroy(ata_request_zone); 1168 } 1169 SYSUNINIT(ata_unregister, SI_SUB_DRIVERS, SI_ORDER_SECOND, ata_uninit, NULL); 1170