1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/ata.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/endian.h> 38 #include <sys/ctype.h> 39 #include <sys/conf.h> 40 #include <sys/bus.h> 41 #include <sys/bio.h> 42 #include <sys/malloc.h> 43 #include <sys/sysctl.h> 44 #include <sys/sema.h> 45 #include <sys/taskqueue.h> 46 #include <vm/uma.h> 47 #include <machine/stdarg.h> 48 #include <machine/resource.h> 49 #include <machine/bus.h> 50 #include <sys/rman.h> 51 #include <dev/ata/ata-all.h> 52 #include <dev/pci/pcivar.h> 53 #include <ata_if.h> 54 55 #include <cam/cam.h> 56 #include <cam/cam_ccb.h> 57 #include <cam/cam_sim.h> 58 #include <cam/cam_xpt_sim.h> 59 #include <cam/cam_debug.h> 60 61 /* prototypes */ 62 static void ataaction(struct cam_sim *sim, union ccb *ccb); 63 static void atapoll(struct cam_sim *sim); 64 static void ata_cam_begin_transaction(device_t dev, union ccb *ccb); 65 static void ata_cam_end_transaction(device_t dev, struct ata_request *request); 66 static void ata_cam_request_sense(device_t dev, struct ata_request *request); 67 static int ata_check_ids(device_t dev, union ccb *ccb); 68 static void ata_conn_event(void *context, int dummy); 69 static void ata_interrupt_locked(void *data); 70 static int ata_module_event_handler(module_t mod, int what, void *arg); 71 static void ata_periodic_poll(void *data); 72 static int ata_str2mode(const char *str); 73 74 /* global vars */ 75 MALLOC_DEFINE(M_ATA, "ata_generic", "ATA driver generic layer"); 76 int (*ata_raid_ioctl_func)(u_long cmd, caddr_t data) = NULL; 77 devclass_t ata_devclass; 78 int ata_dma_check_80pin = 1; 79 80 /* sysctl vars */ 81 static SYSCTL_NODE(_hw, OID_AUTO, ata, CTLFLAG_RD, 0, "ATA driver parameters"); 82 SYSCTL_INT(_hw_ata, OID_AUTO, ata_dma_check_80pin, 83 CTLFLAG_RWTUN, &ata_dma_check_80pin, 0, 84 "Check for 80pin cable before setting ATA DMA mode"); 85 FEATURE(ata_cam, "ATA devices are accessed through the cam(4) driver"); 86 87 /* 88 * newbus device interface related functions 89 */ 90 int 91 ata_probe(device_t dev) 92 { 93 return (BUS_PROBE_LOW_PRIORITY); 94 } 95 96 int 97 ata_attach(device_t dev) 98 { 99 struct ata_channel *ch = device_get_softc(dev); 100 int error, rid; 101 struct cam_devq *devq; 102 const char *res; 103 char buf[64]; 104 int i, mode; 105 106 /* check that we have a virgin channel to attach */ 107 if (ch->r_irq) 108 return EEXIST; 109 110 /* initialize the softc basics */ 111 ch->dev = dev; 112 ch->state = ATA_IDLE; 113 bzero(&ch->state_mtx, sizeof(struct mtx)); 114 mtx_init(&ch->state_mtx, "ATA state lock", NULL, MTX_DEF); 115 TASK_INIT(&ch->conntask, 0, ata_conn_event, dev); 116 for (i = 0; i < 16; i++) { 117 ch->user[i].revision = 0; 118 snprintf(buf, sizeof(buf), "dev%d.sata_rev", i); 119 if (resource_int_value(device_get_name(dev), 120 device_get_unit(dev), buf, &mode) != 0 && 121 resource_int_value(device_get_name(dev), 122 device_get_unit(dev), "sata_rev", &mode) != 0) 123 mode = -1; 124 if (mode >= 0) 125 ch->user[i].revision = mode; 126 ch->user[i].mode = 0; 127 snprintf(buf, sizeof(buf), "dev%d.mode", i); 128 if (resource_string_value(device_get_name(dev), 129 device_get_unit(dev), buf, &res) == 0) 130 mode = ata_str2mode(res); 131 else if (resource_string_value(device_get_name(dev), 132 device_get_unit(dev), "mode", &res) == 0) 133 mode = ata_str2mode(res); 134 else 135 mode = -1; 136 if (mode >= 0) 137 ch->user[i].mode = mode; 138 if (ch->flags & ATA_SATA) 139 ch->user[i].bytecount = 8192; 140 else 141 ch->user[i].bytecount = MAXPHYS; 142 ch->user[i].caps = 0; 143 ch->curr[i] = ch->user[i]; 144 if (ch->flags & ATA_SATA) { 145 if (ch->pm_level > 0) 146 ch->user[i].caps |= CTS_SATA_CAPS_H_PMREQ; 147 if (ch->pm_level > 1) 148 ch->user[i].caps |= CTS_SATA_CAPS_D_PMREQ; 149 } else { 150 if (!(ch->flags & ATA_NO_48BIT_DMA)) 151 ch->user[i].caps |= CTS_ATA_CAPS_H_DMA48; 152 } 153 } 154 callout_init(&ch->poll_callout, 1); 155 156 /* allocate DMA resources if DMA HW present*/ 157 if (ch->dma.alloc) 158 ch->dma.alloc(dev); 159 160 /* setup interrupt delivery */ 161 rid = ATA_IRQ_RID; 162 ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, 163 RF_SHAREABLE | RF_ACTIVE); 164 if (!ch->r_irq) { 165 device_printf(dev, "unable to allocate interrupt\n"); 166 return ENXIO; 167 } 168 if ((error = bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 169 ata_interrupt, ch, &ch->ih))) { 170 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); 171 device_printf(dev, "unable to setup interrupt\n"); 172 return error; 173 } 174 175 if (ch->flags & ATA_PERIODIC_POLL) 176 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 177 mtx_lock(&ch->state_mtx); 178 /* Create the device queue for our SIM. */ 179 devq = cam_simq_alloc(1); 180 if (devq == NULL) { 181 device_printf(dev, "Unable to allocate simq\n"); 182 error = ENOMEM; 183 goto err1; 184 } 185 /* Construct SIM entry */ 186 ch->sim = cam_sim_alloc(ataaction, atapoll, "ata", ch, 187 device_get_unit(dev), &ch->state_mtx, 1, 0, devq); 188 if (ch->sim == NULL) { 189 device_printf(dev, "unable to allocate sim\n"); 190 cam_simq_free(devq); 191 error = ENOMEM; 192 goto err1; 193 } 194 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { 195 device_printf(dev, "unable to register xpt bus\n"); 196 error = ENXIO; 197 goto err2; 198 } 199 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), 200 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 201 device_printf(dev, "unable to create path\n"); 202 error = ENXIO; 203 goto err3; 204 } 205 mtx_unlock(&ch->state_mtx); 206 return (0); 207 208 err3: 209 xpt_bus_deregister(cam_sim_path(ch->sim)); 210 err2: 211 cam_sim_free(ch->sim, /*free_devq*/TRUE); 212 ch->sim = NULL; 213 err1: 214 bus_release_resource(dev, SYS_RES_IRQ, rid, ch->r_irq); 215 mtx_unlock(&ch->state_mtx); 216 if (ch->flags & ATA_PERIODIC_POLL) 217 callout_drain(&ch->poll_callout); 218 return (error); 219 } 220 221 int 222 ata_detach(device_t dev) 223 { 224 struct ata_channel *ch = device_get_softc(dev); 225 226 /* check that we have a valid channel to detach */ 227 if (!ch->r_irq) 228 return ENXIO; 229 230 /* grap the channel lock so no new requests gets launched */ 231 mtx_lock(&ch->state_mtx); 232 ch->state |= ATA_STALL_QUEUE; 233 mtx_unlock(&ch->state_mtx); 234 if (ch->flags & ATA_PERIODIC_POLL) 235 callout_drain(&ch->poll_callout); 236 237 taskqueue_drain(taskqueue_thread, &ch->conntask); 238 239 mtx_lock(&ch->state_mtx); 240 xpt_async(AC_LOST_DEVICE, ch->path, NULL); 241 xpt_free_path(ch->path); 242 xpt_bus_deregister(cam_sim_path(ch->sim)); 243 cam_sim_free(ch->sim, /*free_devq*/TRUE); 244 ch->sim = NULL; 245 mtx_unlock(&ch->state_mtx); 246 247 /* release resources */ 248 bus_teardown_intr(dev, ch->r_irq, ch->ih); 249 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 250 ch->r_irq = NULL; 251 252 /* free DMA resources if DMA HW present*/ 253 if (ch->dma.free) 254 ch->dma.free(dev); 255 256 mtx_destroy(&ch->state_mtx); 257 return 0; 258 } 259 260 static void 261 ata_conn_event(void *context, int dummy) 262 { 263 device_t dev = (device_t)context; 264 struct ata_channel *ch = device_get_softc(dev); 265 union ccb *ccb; 266 267 mtx_lock(&ch->state_mtx); 268 if (ch->sim == NULL) { 269 mtx_unlock(&ch->state_mtx); 270 return; 271 } 272 ata_reinit(dev); 273 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 274 return; 275 if (xpt_create_path(&ccb->ccb_h.path, NULL, 276 cam_sim_path(ch->sim), 277 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 278 xpt_free_ccb(ccb); 279 return; 280 } 281 xpt_rescan(ccb); 282 mtx_unlock(&ch->state_mtx); 283 } 284 285 int 286 ata_reinit(device_t dev) 287 { 288 struct ata_channel *ch = device_get_softc(dev); 289 struct ata_request *request; 290 291 xpt_freeze_simq(ch->sim, 1); 292 if ((request = ch->running)) { 293 ch->running = NULL; 294 if (ch->state == ATA_ACTIVE) 295 ch->state = ATA_IDLE; 296 callout_stop(&request->callout); 297 if (ch->dma.unload) 298 ch->dma.unload(request); 299 request->result = ERESTART; 300 ata_cam_end_transaction(dev, request); 301 } 302 /* reset the controller HW, the channel and device(s) */ 303 ATA_RESET(dev); 304 /* Tell the XPT about the event */ 305 xpt_async(AC_BUS_RESET, ch->path, NULL); 306 xpt_release_simq(ch->sim, TRUE); 307 return(0); 308 } 309 310 int 311 ata_suspend(device_t dev) 312 { 313 struct ata_channel *ch; 314 315 /* check for valid device */ 316 if (!dev || !(ch = device_get_softc(dev))) 317 return ENXIO; 318 319 if (ch->flags & ATA_PERIODIC_POLL) 320 callout_drain(&ch->poll_callout); 321 mtx_lock(&ch->state_mtx); 322 xpt_freeze_simq(ch->sim, 1); 323 while (ch->state != ATA_IDLE) 324 msleep(ch, &ch->state_mtx, PRIBIO, "atasusp", hz/100); 325 mtx_unlock(&ch->state_mtx); 326 return(0); 327 } 328 329 int 330 ata_resume(device_t dev) 331 { 332 struct ata_channel *ch; 333 int error; 334 335 /* check for valid device */ 336 if (!dev || !(ch = device_get_softc(dev))) 337 return ENXIO; 338 339 mtx_lock(&ch->state_mtx); 340 error = ata_reinit(dev); 341 xpt_release_simq(ch->sim, TRUE); 342 mtx_unlock(&ch->state_mtx); 343 if (ch->flags & ATA_PERIODIC_POLL) 344 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 345 return error; 346 } 347 348 void 349 ata_interrupt(void *data) 350 { 351 struct ata_channel *ch = (struct ata_channel *)data; 352 353 mtx_lock(&ch->state_mtx); 354 ata_interrupt_locked(data); 355 mtx_unlock(&ch->state_mtx); 356 } 357 358 static void 359 ata_interrupt_locked(void *data) 360 { 361 struct ata_channel *ch = (struct ata_channel *)data; 362 struct ata_request *request; 363 364 /* ignore interrupt if its not for us */ 365 if (ch->hw.status && !ch->hw.status(ch->dev)) 366 return; 367 368 /* do we have a running request */ 369 if (!(request = ch->running)) 370 return; 371 372 ATA_DEBUG_RQ(request, "interrupt"); 373 374 /* safetycheck for the right state */ 375 if (ch->state == ATA_IDLE) { 376 device_printf(request->dev, "interrupt on idle channel ignored\n"); 377 return; 378 } 379 380 /* 381 * we have the HW locks, so end the transaction for this request 382 * if it finishes immediately otherwise wait for next interrupt 383 */ 384 if (ch->hw.end_transaction(request) == ATA_OP_FINISHED) { 385 ch->running = NULL; 386 if (ch->state == ATA_ACTIVE) 387 ch->state = ATA_IDLE; 388 ata_cam_end_transaction(ch->dev, request); 389 return; 390 } 391 } 392 393 static void 394 ata_periodic_poll(void *data) 395 { 396 struct ata_channel *ch = (struct ata_channel *)data; 397 398 callout_reset(&ch->poll_callout, hz, ata_periodic_poll, ch); 399 ata_interrupt(ch); 400 } 401 402 void 403 ata_print_cable(device_t dev, u_int8_t *who) 404 { 405 device_printf(dev, 406 "DMA limited to UDMA33, %s found non-ATA66 cable\n", who); 407 } 408 409 /* 410 * misc support functions 411 */ 412 void 413 ata_default_registers(device_t dev) 414 { 415 struct ata_channel *ch = device_get_softc(dev); 416 417 /* fill in the defaults from whats setup already */ 418 ch->r_io[ATA_ERROR].res = ch->r_io[ATA_FEATURE].res; 419 ch->r_io[ATA_ERROR].offset = ch->r_io[ATA_FEATURE].offset; 420 ch->r_io[ATA_IREASON].res = ch->r_io[ATA_COUNT].res; 421 ch->r_io[ATA_IREASON].offset = ch->r_io[ATA_COUNT].offset; 422 ch->r_io[ATA_STATUS].res = ch->r_io[ATA_COMMAND].res; 423 ch->r_io[ATA_STATUS].offset = ch->r_io[ATA_COMMAND].offset; 424 ch->r_io[ATA_ALTSTAT].res = ch->r_io[ATA_CONTROL].res; 425 ch->r_io[ATA_ALTSTAT].offset = ch->r_io[ATA_CONTROL].offset; 426 } 427 428 void 429 ata_udelay(int interval) 430 { 431 /* for now just use DELAY, the timer/sleep subsystems are not there yet */ 432 if (1 || interval < (1000000/hz) || ata_delayed_attach) 433 DELAY(interval); 434 else 435 pause("ataslp", interval/(1000000/hz)); 436 } 437 438 const char * 439 ata_cmd2str(struct ata_request *request) 440 { 441 static char buffer[20]; 442 443 if (request->flags & ATA_R_ATAPI) { 444 switch (request->u.atapi.sense.key ? 445 request->u.atapi.saved_cmd : request->u.atapi.ccb[0]) { 446 case 0x00: return ("TEST_UNIT_READY"); 447 case 0x01: return ("REZERO"); 448 case 0x03: return ("REQUEST_SENSE"); 449 case 0x04: return ("FORMAT"); 450 case 0x08: return ("READ"); 451 case 0x0a: return ("WRITE"); 452 case 0x10: return ("WEOF"); 453 case 0x11: return ("SPACE"); 454 case 0x12: return ("INQUIRY"); 455 case 0x15: return ("MODE_SELECT"); 456 case 0x19: return ("ERASE"); 457 case 0x1a: return ("MODE_SENSE"); 458 case 0x1b: return ("START_STOP"); 459 case 0x1e: return ("PREVENT_ALLOW"); 460 case 0x23: return ("ATAPI_READ_FORMAT_CAPACITIES"); 461 case 0x25: return ("READ_CAPACITY"); 462 case 0x28: return ("READ_BIG"); 463 case 0x2a: return ("WRITE_BIG"); 464 case 0x2b: return ("LOCATE"); 465 case 0x34: return ("READ_POSITION"); 466 case 0x35: return ("SYNCHRONIZE_CACHE"); 467 case 0x3b: return ("WRITE_BUFFER"); 468 case 0x3c: return ("READ_BUFFER"); 469 case 0x42: return ("READ_SUBCHANNEL"); 470 case 0x43: return ("READ_TOC"); 471 case 0x45: return ("PLAY_10"); 472 case 0x47: return ("PLAY_MSF"); 473 case 0x48: return ("PLAY_TRACK"); 474 case 0x4b: return ("PAUSE"); 475 case 0x51: return ("READ_DISK_INFO"); 476 case 0x52: return ("READ_TRACK_INFO"); 477 case 0x53: return ("RESERVE_TRACK"); 478 case 0x54: return ("SEND_OPC_INFO"); 479 case 0x55: return ("MODE_SELECT_BIG"); 480 case 0x58: return ("REPAIR_TRACK"); 481 case 0x59: return ("READ_MASTER_CUE"); 482 case 0x5a: return ("MODE_SENSE_BIG"); 483 case 0x5b: return ("CLOSE_TRACK/SESSION"); 484 case 0x5c: return ("READ_BUFFER_CAPACITY"); 485 case 0x5d: return ("SEND_CUE_SHEET"); 486 case 0x96: return ("SERVICE_ACTION_IN"); 487 case 0xa1: return ("BLANK_CMD"); 488 case 0xa3: return ("SEND_KEY"); 489 case 0xa4: return ("REPORT_KEY"); 490 case 0xa5: return ("PLAY_12"); 491 case 0xa6: return ("LOAD_UNLOAD"); 492 case 0xad: return ("READ_DVD_STRUCTURE"); 493 case 0xb4: return ("PLAY_CD"); 494 case 0xbb: return ("SET_SPEED"); 495 case 0xbd: return ("MECH_STATUS"); 496 case 0xbe: return ("READ_CD"); 497 case 0xff: return ("POLL_DSC"); 498 } 499 } else { 500 switch (request->u.ata.command) { 501 case 0x00: 502 switch (request->u.ata.feature) { 503 case 0x00: return ("NOP FLUSHQUEUE"); 504 case 0x01: return ("NOP AUTOPOLL"); 505 } 506 return ("NOP"); 507 case 0x03: return ("CFA_REQUEST_EXTENDED_ERROR"); 508 case 0x06: 509 switch (request->u.ata.feature) { 510 case 0x01: return ("DSM TRIM"); 511 } 512 return "DSM"; 513 case 0x08: return ("DEVICE_RESET"); 514 case 0x20: return ("READ"); 515 case 0x24: return ("READ48"); 516 case 0x25: return ("READ_DMA48"); 517 case 0x26: return ("READ_DMA_QUEUED48"); 518 case 0x27: return ("READ_NATIVE_MAX_ADDRESS48"); 519 case 0x29: return ("READ_MUL48"); 520 case 0x2a: return ("READ_STREAM_DMA48"); 521 case 0x2b: return ("READ_STREAM48"); 522 case 0x2f: return ("READ_LOG_EXT"); 523 case 0x30: return ("WRITE"); 524 case 0x34: return ("WRITE48"); 525 case 0x35: return ("WRITE_DMA48"); 526 case 0x36: return ("WRITE_DMA_QUEUED48"); 527 case 0x37: return ("SET_MAX_ADDRESS48"); 528 case 0x39: return ("WRITE_MUL48"); 529 case 0x3a: return ("WRITE_STREAM_DMA48"); 530 case 0x3b: return ("WRITE_STREAM48"); 531 case 0x3d: return ("WRITE_DMA_FUA48"); 532 case 0x3e: return ("WRITE_DMA_QUEUED_FUA48"); 533 case 0x3f: return ("WRITE_LOG_EXT"); 534 case 0x40: return ("READ_VERIFY"); 535 case 0x42: return ("READ_VERIFY48"); 536 case 0x45: 537 switch (request->u.ata.feature) { 538 case 0x55: return ("WRITE_UNCORRECTABLE48 PSEUDO"); 539 case 0xaa: return ("WRITE_UNCORRECTABLE48 FLAGGED"); 540 } 541 return "WRITE_UNCORRECTABLE48"; 542 case 0x51: return ("CONFIGURE_STREAM"); 543 case 0x60: return ("READ_FPDMA_QUEUED"); 544 case 0x61: return ("WRITE_FPDMA_QUEUED"); 545 case 0x63: return ("NCQ_NON_DATA"); 546 case 0x64: return ("SEND_FPDMA_QUEUED"); 547 case 0x65: return ("RECEIVE_FPDMA_QUEUED"); 548 case 0x67: 549 if (request->u.ata.feature == 0xec) 550 return ("SEP_ATTN IDENTIFY"); 551 switch (request->u.ata.lba) { 552 case 0x00: return ("SEP_ATTN READ BUFFER"); 553 case 0x02: return ("SEP_ATTN RECEIVE DIAGNOSTIC RESULTS"); 554 case 0x80: return ("SEP_ATTN WRITE BUFFER"); 555 case 0x82: return ("SEP_ATTN SEND DIAGNOSTIC"); 556 } 557 return ("SEP_ATTN"); 558 case 0x70: return ("SEEK"); 559 case 0x87: return ("CFA_TRANSLATE_SECTOR"); 560 case 0x90: return ("EXECUTE_DEVICE_DIAGNOSTIC"); 561 case 0x92: return ("DOWNLOAD_MICROCODE"); 562 case 0xa0: return ("PACKET"); 563 case 0xa1: return ("ATAPI_IDENTIFY"); 564 case 0xa2: return ("SERVICE"); 565 case 0xb0: 566 switch(request->u.ata.feature) { 567 case 0xd0: return ("SMART READ ATTR VALUES"); 568 case 0xd1: return ("SMART READ ATTR THRESHOLDS"); 569 case 0xd3: return ("SMART SAVE ATTR VALUES"); 570 case 0xd4: return ("SMART EXECUTE OFFLINE IMMEDIATE"); 571 case 0xd5: return ("SMART READ LOG DATA"); 572 case 0xd8: return ("SMART ENABLE OPERATION"); 573 case 0xd9: return ("SMART DISABLE OPERATION"); 574 case 0xda: return ("SMART RETURN STATUS"); 575 } 576 return ("SMART"); 577 case 0xb1: return ("DEVICE CONFIGURATION"); 578 case 0xc0: return ("CFA_ERASE"); 579 case 0xc4: return ("READ_MUL"); 580 case 0xc5: return ("WRITE_MUL"); 581 case 0xc6: return ("SET_MULTI"); 582 case 0xc7: return ("READ_DMA_QUEUED"); 583 case 0xc8: return ("READ_DMA"); 584 case 0xca: return ("WRITE_DMA"); 585 case 0xcc: return ("WRITE_DMA_QUEUED"); 586 case 0xcd: return ("CFA_WRITE_MULTIPLE_WITHOUT_ERASE"); 587 case 0xce: return ("WRITE_MUL_FUA48"); 588 case 0xd1: return ("CHECK_MEDIA_CARD_TYPE"); 589 case 0xda: return ("GET_MEDIA_STATUS"); 590 case 0xde: return ("MEDIA_LOCK"); 591 case 0xdf: return ("MEDIA_UNLOCK"); 592 case 0xe0: return ("STANDBY_IMMEDIATE"); 593 case 0xe1: return ("IDLE_IMMEDIATE"); 594 case 0xe2: return ("STANDBY"); 595 case 0xe3: return ("IDLE"); 596 case 0xe4: return ("READ_BUFFER/PM"); 597 case 0xe5: return ("CHECK_POWER_MODE"); 598 case 0xe6: return ("SLEEP"); 599 case 0xe7: return ("FLUSHCACHE"); 600 case 0xe8: return ("WRITE_PM"); 601 case 0xea: return ("FLUSHCACHE48"); 602 case 0xec: return ("ATA_IDENTIFY"); 603 case 0xed: return ("MEDIA_EJECT"); 604 case 0xef: 605 switch (request->u.ata.feature) { 606 case 0x03: return ("SETFEATURES SET TRANSFER MODE"); 607 case 0x02: return ("SETFEATURES ENABLE WCACHE"); 608 case 0x82: return ("SETFEATURES DISABLE WCACHE"); 609 case 0x06: return ("SETFEATURES ENABLE PUIS"); 610 case 0x86: return ("SETFEATURES DISABLE PUIS"); 611 case 0x07: return ("SETFEATURES SPIN-UP"); 612 case 0x10: return ("SETFEATURES ENABLE SATA FEATURE"); 613 case 0x90: return ("SETFEATURES DISABLE SATA FEATURE"); 614 case 0xaa: return ("SETFEATURES ENABLE RCACHE"); 615 case 0x55: return ("SETFEATURES DISABLE RCACHE"); 616 case 0x5d: return ("SETFEATURES ENABLE RELIRQ"); 617 case 0xdd: return ("SETFEATURES DISABLE RELIRQ"); 618 case 0x5e: return ("SETFEATURES ENABLE SRVIRQ"); 619 case 0xde: return ("SETFEATURES DISABLE SRVIRQ"); 620 } 621 return "SETFEATURES"; 622 case 0xf1: return ("SECURITY_SET_PASSWORD"); 623 case 0xf2: return ("SECURITY_UNLOCK"); 624 case 0xf3: return ("SECURITY_ERASE_PREPARE"); 625 case 0xf4: return ("SECURITY_ERASE_UNIT"); 626 case 0xf5: return ("SECURITY_FREEZE_LOCK"); 627 case 0xf6: return ("SECURITY_DISABLE_PASSWORD"); 628 case 0xf8: return ("READ_NATIVE_MAX_ADDRESS"); 629 case 0xf9: return ("SET_MAX_ADDRESS"); 630 } 631 } 632 sprintf(buffer, "unknown CMD (0x%02x)", request->u.ata.command); 633 return (buffer); 634 } 635 636 const char * 637 ata_mode2str(int mode) 638 { 639 switch (mode) { 640 case -1: return "UNSUPPORTED"; 641 case ATA_PIO0: return "PIO0"; 642 case ATA_PIO1: return "PIO1"; 643 case ATA_PIO2: return "PIO2"; 644 case ATA_PIO3: return "PIO3"; 645 case ATA_PIO4: return "PIO4"; 646 case ATA_WDMA0: return "WDMA0"; 647 case ATA_WDMA1: return "WDMA1"; 648 case ATA_WDMA2: return "WDMA2"; 649 case ATA_UDMA0: return "UDMA16"; 650 case ATA_UDMA1: return "UDMA25"; 651 case ATA_UDMA2: return "UDMA33"; 652 case ATA_UDMA3: return "UDMA40"; 653 case ATA_UDMA4: return "UDMA66"; 654 case ATA_UDMA5: return "UDMA100"; 655 case ATA_UDMA6: return "UDMA133"; 656 case ATA_SA150: return "SATA150"; 657 case ATA_SA300: return "SATA300"; 658 case ATA_SA600: return "SATA600"; 659 default: 660 if (mode & ATA_DMA_MASK) 661 return "BIOSDMA"; 662 else 663 return "BIOSPIO"; 664 } 665 } 666 667 static int 668 ata_str2mode(const char *str) 669 { 670 671 if (!strcasecmp(str, "PIO0")) return (ATA_PIO0); 672 if (!strcasecmp(str, "PIO1")) return (ATA_PIO1); 673 if (!strcasecmp(str, "PIO2")) return (ATA_PIO2); 674 if (!strcasecmp(str, "PIO3")) return (ATA_PIO3); 675 if (!strcasecmp(str, "PIO4")) return (ATA_PIO4); 676 if (!strcasecmp(str, "WDMA0")) return (ATA_WDMA0); 677 if (!strcasecmp(str, "WDMA1")) return (ATA_WDMA1); 678 if (!strcasecmp(str, "WDMA2")) return (ATA_WDMA2); 679 if (!strcasecmp(str, "UDMA0")) return (ATA_UDMA0); 680 if (!strcasecmp(str, "UDMA16")) return (ATA_UDMA0); 681 if (!strcasecmp(str, "UDMA1")) return (ATA_UDMA1); 682 if (!strcasecmp(str, "UDMA25")) return (ATA_UDMA1); 683 if (!strcasecmp(str, "UDMA2")) return (ATA_UDMA2); 684 if (!strcasecmp(str, "UDMA33")) return (ATA_UDMA2); 685 if (!strcasecmp(str, "UDMA3")) return (ATA_UDMA3); 686 if (!strcasecmp(str, "UDMA44")) return (ATA_UDMA3); 687 if (!strcasecmp(str, "UDMA4")) return (ATA_UDMA4); 688 if (!strcasecmp(str, "UDMA66")) return (ATA_UDMA4); 689 if (!strcasecmp(str, "UDMA5")) return (ATA_UDMA5); 690 if (!strcasecmp(str, "UDMA100")) return (ATA_UDMA5); 691 if (!strcasecmp(str, "UDMA6")) return (ATA_UDMA6); 692 if (!strcasecmp(str, "UDMA133")) return (ATA_UDMA6); 693 return (-1); 694 } 695 696 int 697 ata_atapi(device_t dev, int target) 698 { 699 struct ata_channel *ch = device_get_softc(dev); 700 701 return (ch->devices & (ATA_ATAPI_MASTER << target)); 702 } 703 704 void 705 ata_timeout(struct ata_request *request) 706 { 707 struct ata_channel *ch; 708 709 ch = device_get_softc(request->parent); 710 //request->flags |= ATA_R_DEBUG; 711 ATA_DEBUG_RQ(request, "timeout"); 712 713 /* 714 * If we have an ATA_ACTIVE request running, we flag the request 715 * ATA_R_TIMEOUT so ata_cam_end_transaction() will handle it correctly. 716 * Also, NULL out the running request so we wont loose the race with 717 * an eventual interrupt arriving late. 718 */ 719 if (ch->state == ATA_ACTIVE) { 720 request->flags |= ATA_R_TIMEOUT; 721 if (ch->dma.unload) 722 ch->dma.unload(request); 723 ch->running = NULL; 724 ch->state = ATA_IDLE; 725 ata_cam_end_transaction(ch->dev, request); 726 } 727 mtx_unlock(&ch->state_mtx); 728 } 729 730 static void 731 ata_cam_begin_transaction(device_t dev, union ccb *ccb) 732 { 733 struct ata_channel *ch = device_get_softc(dev); 734 struct ata_request *request; 735 736 request = &ch->request; 737 bzero(request, sizeof(*request)); 738 739 /* setup request */ 740 request->dev = NULL; 741 request->parent = dev; 742 request->unit = ccb->ccb_h.target_id; 743 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 744 request->data = ccb->ataio.data_ptr; 745 request->bytecount = ccb->ataio.dxfer_len; 746 request->u.ata.command = ccb->ataio.cmd.command; 747 request->u.ata.feature = ((uint16_t)ccb->ataio.cmd.features_exp << 8) | 748 (uint16_t)ccb->ataio.cmd.features; 749 request->u.ata.count = ((uint16_t)ccb->ataio.cmd.sector_count_exp << 8) | 750 (uint16_t)ccb->ataio.cmd.sector_count; 751 if (ccb->ataio.cmd.flags & CAM_ATAIO_48BIT) { 752 request->flags |= ATA_R_48BIT; 753 request->u.ata.lba = 754 ((uint64_t)ccb->ataio.cmd.lba_high_exp << 40) | 755 ((uint64_t)ccb->ataio.cmd.lba_mid_exp << 32) | 756 ((uint64_t)ccb->ataio.cmd.lba_low_exp << 24); 757 } else { 758 request->u.ata.lba = 759 ((uint64_t)(ccb->ataio.cmd.device & 0x0f) << 24); 760 } 761 request->u.ata.lba |= ((uint64_t)ccb->ataio.cmd.lba_high << 16) | 762 ((uint64_t)ccb->ataio.cmd.lba_mid << 8) | 763 (uint64_t)ccb->ataio.cmd.lba_low; 764 if (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT) 765 request->flags |= ATA_R_NEEDRESULT; 766 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 767 ccb->ataio.cmd.flags & CAM_ATAIO_DMA) 768 request->flags |= ATA_R_DMA; 769 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 770 request->flags |= ATA_R_READ; 771 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 772 request->flags |= ATA_R_WRITE; 773 if (ccb->ataio.cmd.command == ATA_READ_MUL || 774 ccb->ataio.cmd.command == ATA_READ_MUL48 || 775 ccb->ataio.cmd.command == ATA_WRITE_MUL || 776 ccb->ataio.cmd.command == ATA_WRITE_MUL48) { 777 request->transfersize = min(request->bytecount, 778 ch->curr[ccb->ccb_h.target_id].bytecount); 779 } else 780 request->transfersize = min(request->bytecount, 512); 781 } else { 782 request->data = ccb->csio.data_ptr; 783 request->bytecount = ccb->csio.dxfer_len; 784 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 785 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, 786 request->u.atapi.ccb, ccb->csio.cdb_len); 787 request->flags |= ATA_R_ATAPI; 788 if (ch->curr[ccb->ccb_h.target_id].atapi == 16) 789 request->flags |= ATA_R_ATAPI16; 790 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 791 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 792 request->flags |= ATA_R_DMA; 793 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) 794 request->flags |= ATA_R_READ; 795 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) 796 request->flags |= ATA_R_WRITE; 797 request->transfersize = min(request->bytecount, 798 ch->curr[ccb->ccb_h.target_id].bytecount); 799 } 800 request->retries = 0; 801 request->timeout = (ccb->ccb_h.timeout + 999) / 1000; 802 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); 803 request->ccb = ccb; 804 request->flags |= ATA_R_DATA_IN_CCB; 805 806 ch->running = request; 807 ch->state = ATA_ACTIVE; 808 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { 809 ch->running = NULL; 810 ch->state = ATA_IDLE; 811 ata_cam_end_transaction(dev, request); 812 return; 813 } 814 } 815 816 static void 817 ata_cam_request_sense(device_t dev, struct ata_request *request) 818 { 819 struct ata_channel *ch = device_get_softc(dev); 820 union ccb *ccb = request->ccb; 821 822 ch->requestsense = 1; 823 824 bzero(request, sizeof(*request)); 825 request->dev = NULL; 826 request->parent = dev; 827 request->unit = ccb->ccb_h.target_id; 828 request->data = (void *)&ccb->csio.sense_data; 829 request->bytecount = ccb->csio.sense_len; 830 request->u.atapi.ccb[0] = ATAPI_REQUEST_SENSE; 831 request->u.atapi.ccb[4] = ccb->csio.sense_len; 832 request->flags |= ATA_R_ATAPI; 833 if (ch->curr[ccb->ccb_h.target_id].atapi == 16) 834 request->flags |= ATA_R_ATAPI16; 835 if (ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 836 request->flags |= ATA_R_DMA; 837 request->flags |= ATA_R_READ; 838 request->transfersize = min(request->bytecount, 839 ch->curr[ccb->ccb_h.target_id].bytecount); 840 request->retries = 0; 841 request->timeout = (ccb->ccb_h.timeout + 999) / 1000; 842 callout_init_mtx(&request->callout, &ch->state_mtx, CALLOUT_RETURNUNLOCKED); 843 request->ccb = ccb; 844 845 ch->running = request; 846 ch->state = ATA_ACTIVE; 847 if (ch->hw.begin_transaction(request) == ATA_OP_FINISHED) { 848 ch->running = NULL; 849 ch->state = ATA_IDLE; 850 ata_cam_end_transaction(dev, request); 851 return; 852 } 853 } 854 855 static void 856 ata_cam_process_sense(device_t dev, struct ata_request *request) 857 { 858 struct ata_channel *ch = device_get_softc(dev); 859 union ccb *ccb = request->ccb; 860 int fatalerr = 0; 861 862 ch->requestsense = 0; 863 864 if (request->flags & ATA_R_TIMEOUT) 865 fatalerr = 1; 866 if ((request->flags & ATA_R_TIMEOUT) == 0 && 867 (request->status & ATA_S_ERROR) == 0 && 868 request->result == 0) { 869 ccb->ccb_h.status |= CAM_AUTOSNS_VALID; 870 } else { 871 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 872 ccb->ccb_h.status |= CAM_AUTOSENSE_FAIL; 873 } 874 875 xpt_done(ccb); 876 /* Do error recovery if needed. */ 877 if (fatalerr) 878 ata_reinit(dev); 879 } 880 881 static void 882 ata_cam_end_transaction(device_t dev, struct ata_request *request) 883 { 884 struct ata_channel *ch = device_get_softc(dev); 885 union ccb *ccb = request->ccb; 886 int fatalerr = 0; 887 888 if (ch->requestsense) { 889 ata_cam_process_sense(dev, request); 890 return; 891 } 892 893 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 894 if (request->flags & ATA_R_TIMEOUT) { 895 xpt_freeze_simq(ch->sim, 1); 896 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 897 ccb->ccb_h.status |= CAM_CMD_TIMEOUT | CAM_RELEASE_SIMQ; 898 fatalerr = 1; 899 } else if (request->status & ATA_S_ERROR) { 900 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 901 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; 902 } else { 903 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 904 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 905 } 906 } else if (request->result == ERESTART) 907 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 908 else if (request->result != 0) 909 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 910 else 911 ccb->ccb_h.status |= CAM_REQ_CMP; 912 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP && 913 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 914 xpt_freeze_devq(ccb->ccb_h.path, 1); 915 ccb->ccb_h.status |= CAM_DEV_QFRZN; 916 } 917 if (ccb->ccb_h.func_code == XPT_ATA_IO && 918 ((request->status & ATA_S_ERROR) || 919 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT))) { 920 struct ata_res *res = &ccb->ataio.res; 921 res->status = request->status; 922 res->error = request->error; 923 res->lba_low = request->u.ata.lba; 924 res->lba_mid = request->u.ata.lba >> 8; 925 res->lba_high = request->u.ata.lba >> 16; 926 res->device = request->u.ata.lba >> 24; 927 res->lba_low_exp = request->u.ata.lba >> 24; 928 res->lba_mid_exp = request->u.ata.lba >> 32; 929 res->lba_high_exp = request->u.ata.lba >> 40; 930 res->sector_count = request->u.ata.count; 931 res->sector_count_exp = request->u.ata.count >> 8; 932 } 933 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 934 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 935 ccb->ataio.resid = 936 ccb->ataio.dxfer_len - request->donecount; 937 } else { 938 ccb->csio.resid = 939 ccb->csio.dxfer_len - request->donecount; 940 } 941 } 942 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && 943 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0) 944 ata_cam_request_sense(dev, request); 945 else 946 xpt_done(ccb); 947 /* Do error recovery if needed. */ 948 if (fatalerr) 949 ata_reinit(dev); 950 } 951 952 static int 953 ata_check_ids(device_t dev, union ccb *ccb) 954 { 955 struct ata_channel *ch = device_get_softc(dev); 956 957 if (ccb->ccb_h.target_id > ((ch->flags & ATA_NO_SLAVE) ? 0 : 1)) { 958 ccb->ccb_h.status = CAM_TID_INVALID; 959 xpt_done(ccb); 960 return (-1); 961 } 962 if (ccb->ccb_h.target_lun != 0) { 963 ccb->ccb_h.status = CAM_LUN_INVALID; 964 xpt_done(ccb); 965 return (-1); 966 } 967 /* 968 * It's a programming error to see AUXILIARY register requests. 969 */ 970 KASSERT(ccb->ccb_h.func_code != XPT_ATA_IO || 971 ((ccb->ataio.ata_flags & ATA_FLAG_AUX) == 0), 972 ("AUX register unsupported")); 973 return (0); 974 } 975 976 static void 977 ataaction(struct cam_sim *sim, union ccb *ccb) 978 { 979 device_t dev, parent; 980 struct ata_channel *ch; 981 982 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n", 983 ccb->ccb_h.func_code)); 984 985 ch = (struct ata_channel *)cam_sim_softc(sim); 986 dev = ch->dev; 987 switch (ccb->ccb_h.func_code) { 988 /* Common cases first */ 989 case XPT_ATA_IO: /* Execute the requested I/O operation */ 990 case XPT_SCSI_IO: 991 if (ata_check_ids(dev, ccb)) 992 return; 993 if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) 994 << ccb->ccb_h.target_id)) == 0) { 995 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 996 break; 997 } 998 if (ch->running) 999 device_printf(dev, "already running!\n"); 1000 if (ccb->ccb_h.func_code == XPT_ATA_IO && 1001 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1002 (ccb->ataio.cmd.control & ATA_A_RESET)) { 1003 struct ata_res *res = &ccb->ataio.res; 1004 1005 bzero(res, sizeof(*res)); 1006 if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) { 1007 res->lba_high = 0; 1008 res->lba_mid = 0; 1009 } else { 1010 res->lba_high = 0xeb; 1011 res->lba_mid = 0x14; 1012 } 1013 ccb->ccb_h.status = CAM_REQ_CMP; 1014 break; 1015 } 1016 ata_cam_begin_transaction(dev, ccb); 1017 return; 1018 case XPT_ABORT: /* Abort the specified CCB */ 1019 /* XXX Implement */ 1020 ccb->ccb_h.status = CAM_REQ_INVALID; 1021 break; 1022 case XPT_SET_TRAN_SETTINGS: 1023 { 1024 struct ccb_trans_settings *cts = &ccb->cts; 1025 struct ata_cam_device *d; 1026 1027 if (ata_check_ids(dev, ccb)) 1028 return; 1029 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 1030 d = &ch->curr[ccb->ccb_h.target_id]; 1031 else 1032 d = &ch->user[ccb->ccb_h.target_id]; 1033 if (ch->flags & ATA_SATA) { 1034 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) 1035 d->revision = cts->xport_specific.sata.revision; 1036 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) { 1037 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1038 d->mode = ATA_SETMODE(ch->dev, 1039 ccb->ccb_h.target_id, 1040 cts->xport_specific.sata.mode); 1041 } else 1042 d->mode = cts->xport_specific.sata.mode; 1043 } 1044 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) 1045 d->bytecount = min(8192, cts->xport_specific.sata.bytecount); 1046 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) 1047 d->atapi = cts->xport_specific.sata.atapi; 1048 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) 1049 d->caps = cts->xport_specific.sata.caps; 1050 } else { 1051 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) { 1052 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1053 d->mode = ATA_SETMODE(ch->dev, 1054 ccb->ccb_h.target_id, 1055 cts->xport_specific.ata.mode); 1056 } else 1057 d->mode = cts->xport_specific.ata.mode; 1058 } 1059 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) 1060 d->bytecount = cts->xport_specific.ata.bytecount; 1061 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI) 1062 d->atapi = cts->xport_specific.ata.atapi; 1063 if (cts->xport_specific.ata.valid & CTS_ATA_VALID_CAPS) 1064 d->caps = cts->xport_specific.ata.caps; 1065 } 1066 ccb->ccb_h.status = CAM_REQ_CMP; 1067 break; 1068 } 1069 case XPT_GET_TRAN_SETTINGS: 1070 { 1071 struct ccb_trans_settings *cts = &ccb->cts; 1072 struct ata_cam_device *d; 1073 1074 if (ata_check_ids(dev, ccb)) 1075 return; 1076 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 1077 d = &ch->curr[ccb->ccb_h.target_id]; 1078 else 1079 d = &ch->user[ccb->ccb_h.target_id]; 1080 cts->protocol = PROTO_UNSPECIFIED; 1081 cts->protocol_version = PROTO_VERSION_UNSPECIFIED; 1082 if (ch->flags & ATA_SATA) { 1083 cts->transport = XPORT_SATA; 1084 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 1085 cts->xport_specific.sata.valid = 0; 1086 cts->xport_specific.sata.mode = d->mode; 1087 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; 1088 cts->xport_specific.sata.bytecount = d->bytecount; 1089 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; 1090 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1091 cts->xport_specific.sata.revision = 1092 ATA_GETREV(dev, ccb->ccb_h.target_id); 1093 if (cts->xport_specific.sata.revision != 0xff) { 1094 cts->xport_specific.sata.valid |= 1095 CTS_SATA_VALID_REVISION; 1096 } 1097 cts->xport_specific.sata.caps = 1098 d->caps & CTS_SATA_CAPS_D; 1099 if (ch->pm_level) { 1100 cts->xport_specific.sata.caps |= 1101 CTS_SATA_CAPS_H_PMREQ; 1102 } 1103 cts->xport_specific.sata.caps &= 1104 ch->user[ccb->ccb_h.target_id].caps; 1105 } else { 1106 cts->xport_specific.sata.revision = d->revision; 1107 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; 1108 cts->xport_specific.sata.caps = d->caps; 1109 } 1110 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 1111 cts->xport_specific.sata.atapi = d->atapi; 1112 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; 1113 } else { 1114 cts->transport = XPORT_ATA; 1115 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 1116 cts->xport_specific.ata.valid = 0; 1117 cts->xport_specific.ata.mode = d->mode; 1118 cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE; 1119 cts->xport_specific.ata.bytecount = d->bytecount; 1120 cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT; 1121 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { 1122 cts->xport_specific.ata.caps = 1123 d->caps & CTS_ATA_CAPS_D; 1124 if (!(ch->flags & ATA_NO_48BIT_DMA)) 1125 cts->xport_specific.ata.caps |= 1126 CTS_ATA_CAPS_H_DMA48; 1127 cts->xport_specific.ata.caps &= 1128 ch->user[ccb->ccb_h.target_id].caps; 1129 } else 1130 cts->xport_specific.ata.caps = d->caps; 1131 cts->xport_specific.ata.valid |= CTS_ATA_VALID_CAPS; 1132 cts->xport_specific.ata.atapi = d->atapi; 1133 cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI; 1134 } 1135 ccb->ccb_h.status = CAM_REQ_CMP; 1136 break; 1137 } 1138 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 1139 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 1140 ata_reinit(dev); 1141 ccb->ccb_h.status = CAM_REQ_CMP; 1142 break; 1143 case XPT_TERM_IO: /* Terminate the I/O process */ 1144 /* XXX Implement */ 1145 ccb->ccb_h.status = CAM_REQ_INVALID; 1146 break; 1147 case XPT_PATH_INQ: /* Path routing inquiry */ 1148 { 1149 struct ccb_pathinq *cpi = &ccb->cpi; 1150 1151 parent = device_get_parent(dev); 1152 cpi->version_num = 1; /* XXX??? */ 1153 cpi->hba_inquiry = PI_SDTR_ABLE; 1154 cpi->target_sprt = 0; 1155 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; 1156 cpi->hba_eng_cnt = 0; 1157 if (ch->flags & ATA_NO_SLAVE) 1158 cpi->max_target = 0; 1159 else 1160 cpi->max_target = 1; 1161 cpi->max_lun = 0; 1162 cpi->initiator_id = 0; 1163 cpi->bus_id = cam_sim_bus(sim); 1164 if (ch->flags & ATA_SATA) 1165 cpi->base_transfer_speed = 150000; 1166 else 1167 cpi->base_transfer_speed = 3300; 1168 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 1169 strlcpy(cpi->hba_vid, "ATA", HBA_IDLEN); 1170 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 1171 cpi->unit_number = cam_sim_unit(sim); 1172 if (ch->flags & ATA_SATA) 1173 cpi->transport = XPORT_SATA; 1174 else 1175 cpi->transport = XPORT_ATA; 1176 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 1177 cpi->protocol = PROTO_ATA; 1178 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 1179 cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS; 1180 if (device_get_devclass(device_get_parent(parent)) == 1181 devclass_find("pci")) { 1182 cpi->hba_vendor = pci_get_vendor(parent); 1183 cpi->hba_device = pci_get_device(parent); 1184 cpi->hba_subvendor = pci_get_subvendor(parent); 1185 cpi->hba_subdevice = pci_get_subdevice(parent); 1186 } 1187 cpi->ccb_h.status = CAM_REQ_CMP; 1188 break; 1189 } 1190 default: 1191 ccb->ccb_h.status = CAM_REQ_INVALID; 1192 break; 1193 } 1194 xpt_done(ccb); 1195 } 1196 1197 static void 1198 atapoll(struct cam_sim *sim) 1199 { 1200 struct ata_channel *ch = (struct ata_channel *)cam_sim_softc(sim); 1201 1202 ata_interrupt_locked(ch); 1203 } 1204 1205 /* 1206 * module handeling 1207 */ 1208 static int 1209 ata_module_event_handler(module_t mod, int what, void *arg) 1210 { 1211 1212 switch (what) { 1213 case MOD_LOAD: 1214 return 0; 1215 1216 case MOD_UNLOAD: 1217 return 0; 1218 1219 default: 1220 return EOPNOTSUPP; 1221 } 1222 } 1223 1224 static moduledata_t ata_moduledata = { "ata", ata_module_event_handler, NULL }; 1225 DECLARE_MODULE(ata, ata_moduledata, SI_SUB_CONFIGURE, SI_ORDER_SECOND); 1226 MODULE_VERSION(ata, 1); 1227 MODULE_DEPEND(ata, cam, 1, 1, 1); 1228