1 /*- 2 * Copyright (c) 2009-2012 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/module.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/conf.h> 36 #include <sys/endian.h> 37 #include <sys/malloc.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <machine/stdarg.h> 41 #include <machine/resource.h> 42 #include <machine/bus.h> 43 #include <sys/rman.h> 44 #include "ahci.h" 45 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 #include <cam/cam_sim.h> 49 #include <cam/cam_xpt_sim.h> 50 #include <cam/cam_debug.h> 51 52 /* local prototypes */ 53 static void ahci_intr(void *data); 54 static void ahci_intr_one(void *data); 55 static void ahci_intr_one_edge(void *data); 56 static int ahci_ch_init(device_t dev); 57 static int ahci_ch_deinit(device_t dev); 58 static int ahci_ch_suspend(device_t dev); 59 static int ahci_ch_resume(device_t dev); 60 static void ahci_ch_pm(void *arg); 61 static void ahci_ch_intr(void *arg); 62 static void ahci_ch_intr_direct(void *arg); 63 static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus); 64 static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb); 65 static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 66 static void ahci_execute_transaction(struct ahci_slot *slot); 67 static void ahci_timeout(struct ahci_slot *slot); 68 static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et); 69 static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag); 70 static void ahci_dmainit(device_t dev); 71 static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); 72 static void ahci_dmafini(device_t dev); 73 static void ahci_slotsalloc(device_t dev); 74 static void ahci_slotsfree(device_t dev); 75 static void ahci_reset(struct ahci_channel *ch); 76 static void ahci_start(struct ahci_channel *ch, int fbs); 77 static void ahci_stop(struct ahci_channel *ch); 78 static void ahci_clo(struct ahci_channel *ch); 79 static void ahci_start_fr(struct ahci_channel *ch); 80 static void ahci_stop_fr(struct ahci_channel *ch); 81 82 static int ahci_sata_connect(struct ahci_channel *ch); 83 static int ahci_sata_phy_reset(struct ahci_channel *ch); 84 static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0); 85 86 static void ahci_issue_recovery(struct ahci_channel *ch); 87 static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb); 88 static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb); 89 90 static void ahciaction(struct cam_sim *sim, union ccb *ccb); 91 static void ahcipoll(struct cam_sim *sim); 92 93 static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers"); 94 95 #define recovery_type spriv_field0 96 #define RECOVERY_NONE 0 97 #define RECOVERY_READ_LOG 1 98 #define RECOVERY_REQUEST_SENSE 2 99 #define recovery_slot spriv_field1 100 101 int 102 ahci_ctlr_setup(device_t dev) 103 { 104 struct ahci_controller *ctlr = device_get_softc(dev); 105 /* Clear interrupts */ 106 ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS)); 107 /* Configure CCC */ 108 if (ctlr->ccc) { 109 ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI)); 110 ATA_OUTL(ctlr->r_mem, AHCI_CCCC, 111 (ctlr->ccc << AHCI_CCCC_TV_SHIFT) | 112 (4 << AHCI_CCCC_CC_SHIFT) | 113 AHCI_CCCC_EN); 114 ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) & 115 AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT; 116 if (bootverbose) { 117 device_printf(dev, 118 "CCC with %dms/4cmd enabled on vector %d\n", 119 ctlr->ccc, ctlr->cccv); 120 } 121 } 122 /* Enable AHCI interrupts */ 123 ATA_OUTL(ctlr->r_mem, AHCI_GHC, 124 ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE); 125 return (0); 126 } 127 128 int 129 ahci_ctlr_reset(device_t dev) 130 { 131 struct ahci_controller *ctlr = device_get_softc(dev); 132 int timeout; 133 134 /* Enable AHCI mode */ 135 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); 136 /* Reset AHCI controller */ 137 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR); 138 for (timeout = 1000; timeout > 0; timeout--) { 139 DELAY(1000); 140 if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0) 141 break; 142 } 143 if (timeout == 0) { 144 device_printf(dev, "AHCI controller reset failure\n"); 145 return ENXIO; 146 } 147 /* Reenable AHCI mode */ 148 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); 149 return (0); 150 } 151 152 153 int 154 ahci_attach(device_t dev) 155 { 156 struct ahci_controller *ctlr = device_get_softc(dev); 157 int error, i, u, speed, unit; 158 u_int32_t version; 159 device_t child; 160 161 ctlr->dev = dev; 162 ctlr->ccc = 0; 163 resource_int_value(device_get_name(dev), 164 device_get_unit(dev), "ccc", &ctlr->ccc); 165 166 /* Setup our own memory management for channels. */ 167 ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); 168 ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); 169 ctlr->sc_iomem.rm_type = RMAN_ARRAY; 170 ctlr->sc_iomem.rm_descr = "I/O memory addresses"; 171 if ((error = rman_init(&ctlr->sc_iomem)) != 0) { 172 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); 173 return (error); 174 } 175 if ((error = rman_manage_region(&ctlr->sc_iomem, 176 rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { 177 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); 178 rman_fini(&ctlr->sc_iomem); 179 return (error); 180 } 181 /* Get the HW capabilities */ 182 version = ATA_INL(ctlr->r_mem, AHCI_VS); 183 ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP); 184 if (version >= 0x00010200) 185 ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2); 186 if (ctlr->caps & AHCI_CAP_EMS) 187 ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL); 188 ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI); 189 190 /* Identify and set separate quirks for HBA and RAID f/w Marvells. */ 191 if ((ctlr->quirks & AHCI_Q_ALTSIG) && 192 (ctlr->caps & AHCI_CAP_SPM) == 0) 193 ctlr->quirks |= AHCI_Q_NOBSYRES; 194 195 if (ctlr->quirks & AHCI_Q_1CH) { 196 ctlr->caps &= ~AHCI_CAP_NPMASK; 197 ctlr->ichannels &= 0x01; 198 } 199 if (ctlr->quirks & AHCI_Q_2CH) { 200 ctlr->caps &= ~AHCI_CAP_NPMASK; 201 ctlr->caps |= 1; 202 ctlr->ichannels &= 0x03; 203 } 204 if (ctlr->quirks & AHCI_Q_4CH) { 205 ctlr->caps &= ~AHCI_CAP_NPMASK; 206 ctlr->caps |= 3; 207 ctlr->ichannels &= 0x0f; 208 } 209 ctlr->channels = MAX(flsl(ctlr->ichannels), 210 (ctlr->caps & AHCI_CAP_NPMASK) + 1); 211 if (ctlr->quirks & AHCI_Q_NOPMP) 212 ctlr->caps &= ~AHCI_CAP_SPM; 213 if (ctlr->quirks & AHCI_Q_NONCQ) 214 ctlr->caps &= ~AHCI_CAP_SNCQ; 215 if ((ctlr->caps & AHCI_CAP_CCCS) == 0) 216 ctlr->ccc = 0; 217 ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC); 218 219 /* Create controller-wide DMA tag. */ 220 if (bus_dma_tag_create(bus_get_dma_tag(dev), 0, 0, 221 (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR : 222 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 223 BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 224 0, NULL, NULL, &ctlr->dma_tag)) { 225 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, 226 ctlr->r_mem); 227 rman_fini(&ctlr->sc_iomem); 228 return ENXIO; 229 } 230 231 ahci_ctlr_setup(dev); 232 233 /* Setup interrupts. */ 234 if (ahci_setup_interrupt(dev)) { 235 bus_dma_tag_destroy(ctlr->dma_tag); 236 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, 237 ctlr->r_mem); 238 rman_fini(&ctlr->sc_iomem); 239 return ENXIO; 240 } 241 242 i = 0; 243 for (u = ctlr->ichannels; u != 0; u >>= 1) 244 i += (u & 1); 245 ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3)); 246 resource_int_value(device_get_name(dev), device_get_unit(dev), 247 "direct", &ctlr->direct); 248 /* Announce HW capabilities. */ 249 speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT; 250 device_printf(dev, 251 "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n", 252 ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f), 253 ((version >> 4) & 0xf0) + (version & 0x0f), 254 (ctlr->caps & AHCI_CAP_NPMASK) + 1, 255 ((speed == 1) ? "1.5":((speed == 2) ? "3": 256 ((speed == 3) ? "6":"?"))), 257 (ctlr->caps & AHCI_CAP_SPM) ? 258 "supported" : "not supported", 259 (ctlr->caps & AHCI_CAP_FBSS) ? 260 " with FBS" : ""); 261 if (ctlr->quirks != 0) { 262 device_printf(dev, "quirks=0x%b\n", ctlr->quirks, 263 AHCI_Q_BIT_STRING); 264 } 265 if (bootverbose) { 266 device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps", 267 (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"", 268 (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"", 269 (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"", 270 (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"", 271 (ctlr->caps & AHCI_CAP_SSS) ? " SS":"", 272 (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"", 273 (ctlr->caps & AHCI_CAP_SAL) ? " AL":"", 274 (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"", 275 ((speed == 1) ? "1.5":((speed == 2) ? "3": 276 ((speed == 3) ? "6":"?")))); 277 printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n", 278 (ctlr->caps & AHCI_CAP_SAM) ? " AM":"", 279 (ctlr->caps & AHCI_CAP_SPM) ? " PM":"", 280 (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"", 281 (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"", 282 (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"", 283 (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"", 284 ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1, 285 (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"", 286 (ctlr->caps & AHCI_CAP_EMS) ? " EM":"", 287 (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"", 288 (ctlr->caps & AHCI_CAP_NPMASK) + 1); 289 } 290 if (bootverbose && version >= 0x00010200) { 291 device_printf(dev, "Caps2:%s%s%s%s%s%s\n", 292 (ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"", 293 (ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"", 294 (ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"", 295 (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"", 296 (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"", 297 (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":""); 298 } 299 /* Attach all channels on this controller */ 300 for (unit = 0; unit < ctlr->channels; unit++) { 301 child = device_add_child(dev, "ahcich", -1); 302 if (child == NULL) { 303 device_printf(dev, "failed to add channel device\n"); 304 continue; 305 } 306 device_set_ivars(child, (void *)(intptr_t)unit); 307 if ((ctlr->ichannels & (1 << unit)) == 0) 308 device_disable(child); 309 } 310 if (ctlr->caps & AHCI_CAP_EMS) { 311 child = device_add_child(dev, "ahciem", -1); 312 if (child == NULL) 313 device_printf(dev, "failed to add enclosure device\n"); 314 else 315 device_set_ivars(child, (void *)(intptr_t)-1); 316 } 317 bus_generic_attach(dev); 318 return 0; 319 } 320 321 int 322 ahci_detach(device_t dev) 323 { 324 struct ahci_controller *ctlr = device_get_softc(dev); 325 int i; 326 327 /* Detach & delete all children */ 328 device_delete_children(dev); 329 330 /* Free interrupts. */ 331 for (i = 0; i < ctlr->numirqs; i++) { 332 if (ctlr->irqs[i].r_irq) { 333 bus_teardown_intr(dev, ctlr->irqs[i].r_irq, 334 ctlr->irqs[i].handle); 335 bus_release_resource(dev, SYS_RES_IRQ, 336 ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq); 337 } 338 } 339 bus_dma_tag_destroy(ctlr->dma_tag); 340 /* Free memory. */ 341 rman_fini(&ctlr->sc_iomem); 342 if (ctlr->r_mem) 343 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); 344 return (0); 345 } 346 347 int 348 ahci_setup_interrupt(device_t dev) 349 { 350 struct ahci_controller *ctlr = device_get_softc(dev); 351 int i; 352 353 /* Check for single MSI vector fallback. */ 354 if (ctlr->numirqs > 1 && 355 (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) { 356 device_printf(dev, "Falling back to one MSI\n"); 357 ctlr->numirqs = 1; 358 } 359 /* Allocate all IRQs. */ 360 for (i = 0; i < ctlr->numirqs; i++) { 361 ctlr->irqs[i].ctlr = ctlr; 362 ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0); 363 if (ctlr->channels == 1 && !ctlr->ccc) 364 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; 365 else if (ctlr->numirqs == 1 || i >= ctlr->channels || 366 (ctlr->ccc && i == ctlr->cccv)) 367 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL; 368 else if (i == ctlr->numirqs - 1) 369 ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER; 370 else 371 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; 372 if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 373 &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { 374 device_printf(dev, "unable to map interrupt\n"); 375 return ENXIO; 376 } 377 if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL, 378 (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr : 379 ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge : 380 ahci_intr_one), 381 &ctlr->irqs[i], &ctlr->irqs[i].handle))) { 382 /* SOS XXX release r_irq */ 383 device_printf(dev, "unable to setup interrupt\n"); 384 return ENXIO; 385 } 386 if (ctlr->numirqs > 1) { 387 bus_describe_intr(dev, ctlr->irqs[i].r_irq, 388 ctlr->irqs[i].handle, 389 ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ? 390 "ch%d" : "%d", i); 391 } 392 } 393 return (0); 394 } 395 396 /* 397 * Common case interrupt handler. 398 */ 399 static void 400 ahci_intr(void *data) 401 { 402 struct ahci_controller_irq *irq = data; 403 struct ahci_controller *ctlr = irq->ctlr; 404 u_int32_t is, ise = 0; 405 void *arg; 406 int unit; 407 408 if (irq->mode == AHCI_IRQ_MODE_ALL) { 409 unit = 0; 410 if (ctlr->ccc) 411 is = ctlr->ichannels; 412 else 413 is = ATA_INL(ctlr->r_mem, AHCI_IS); 414 } else { /* AHCI_IRQ_MODE_AFTER */ 415 unit = irq->r_irq_rid - 1; 416 is = ATA_INL(ctlr->r_mem, AHCI_IS); 417 } 418 /* CCC interrupt is edge triggered. */ 419 if (ctlr->ccc) 420 ise = 1 << ctlr->cccv; 421 /* Some controllers have edge triggered IS. */ 422 if (ctlr->quirks & AHCI_Q_EDGEIS) 423 ise |= is; 424 if (ise != 0) 425 ATA_OUTL(ctlr->r_mem, AHCI_IS, ise); 426 for (; unit < ctlr->channels; unit++) { 427 if ((is & (1 << unit)) != 0 && 428 (arg = ctlr->interrupt[unit].argument)) { 429 ctlr->interrupt[unit].function(arg); 430 } 431 } 432 /* AHCI declares level triggered IS. */ 433 if (!(ctlr->quirks & AHCI_Q_EDGEIS)) 434 ATA_OUTL(ctlr->r_mem, AHCI_IS, is); 435 } 436 437 /* 438 * Simplified interrupt handler for multivector MSI mode. 439 */ 440 static void 441 ahci_intr_one(void *data) 442 { 443 struct ahci_controller_irq *irq = data; 444 struct ahci_controller *ctlr = irq->ctlr; 445 void *arg; 446 int unit; 447 448 unit = irq->r_irq_rid - 1; 449 if ((arg = ctlr->interrupt[unit].argument)) 450 ctlr->interrupt[unit].function(arg); 451 /* AHCI declares level triggered IS. */ 452 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); 453 } 454 455 static void 456 ahci_intr_one_edge(void *data) 457 { 458 struct ahci_controller_irq *irq = data; 459 struct ahci_controller *ctlr = irq->ctlr; 460 void *arg; 461 int unit; 462 463 unit = irq->r_irq_rid - 1; 464 /* Some controllers have edge triggered IS. */ 465 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); 466 if ((arg = ctlr->interrupt[unit].argument)) 467 ctlr->interrupt[unit].function(arg); 468 } 469 470 struct resource * 471 ahci_alloc_resource(device_t dev, device_t child, int type, int *rid, 472 u_long start, u_long end, u_long count, u_int flags) 473 { 474 struct ahci_controller *ctlr = device_get_softc(dev); 475 struct resource *res; 476 long st; 477 int offset, size, unit; 478 479 unit = (intptr_t)device_get_ivars(child); 480 res = NULL; 481 switch (type) { 482 case SYS_RES_MEMORY: 483 if (unit >= 0) { 484 offset = AHCI_OFFSET + (unit << 7); 485 size = 128; 486 } else if (*rid == 0) { 487 offset = AHCI_EM_CTL; 488 size = 4; 489 } else { 490 offset = (ctlr->emloc & 0xffff0000) >> 14; 491 size = (ctlr->emloc & 0x0000ffff) << 2; 492 if (*rid != 1) { 493 if (*rid == 2 && (ctlr->capsem & 494 (AHCI_EM_XMT | AHCI_EM_SMB)) == 0) 495 offset += size; 496 else 497 break; 498 } 499 } 500 st = rman_get_start(ctlr->r_mem); 501 res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, 502 st + offset + size - 1, size, RF_ACTIVE, child); 503 if (res) { 504 bus_space_handle_t bsh; 505 bus_space_tag_t bst; 506 bsh = rman_get_bushandle(ctlr->r_mem); 507 bst = rman_get_bustag(ctlr->r_mem); 508 bus_space_subregion(bst, bsh, offset, 128, &bsh); 509 rman_set_bushandle(res, bsh); 510 rman_set_bustag(res, bst); 511 } 512 break; 513 case SYS_RES_IRQ: 514 if (*rid == ATA_IRQ_RID) 515 res = ctlr->irqs[0].r_irq; 516 break; 517 } 518 return (res); 519 } 520 521 int 522 ahci_release_resource(device_t dev, device_t child, int type, int rid, 523 struct resource *r) 524 { 525 526 switch (type) { 527 case SYS_RES_MEMORY: 528 rman_release_resource(r); 529 return (0); 530 case SYS_RES_IRQ: 531 if (rid != ATA_IRQ_RID) 532 return ENOENT; 533 return (0); 534 } 535 return (EINVAL); 536 } 537 538 int 539 ahci_setup_intr(device_t dev, device_t child, struct resource *irq, 540 int flags, driver_filter_t *filter, driver_intr_t *function, 541 void *argument, void **cookiep) 542 { 543 struct ahci_controller *ctlr = device_get_softc(dev); 544 int unit = (intptr_t)device_get_ivars(child); 545 546 if (filter != NULL) { 547 printf("ahci.c: we cannot use a filter here\n"); 548 return (EINVAL); 549 } 550 ctlr->interrupt[unit].function = function; 551 ctlr->interrupt[unit].argument = argument; 552 return (0); 553 } 554 555 int 556 ahci_teardown_intr(device_t dev, device_t child, struct resource *irq, 557 void *cookie) 558 { 559 struct ahci_controller *ctlr = device_get_softc(dev); 560 int unit = (intptr_t)device_get_ivars(child); 561 562 ctlr->interrupt[unit].function = NULL; 563 ctlr->interrupt[unit].argument = NULL; 564 return (0); 565 } 566 567 int 568 ahci_print_child(device_t dev, device_t child) 569 { 570 int retval, channel; 571 572 retval = bus_print_child_header(dev, child); 573 channel = (int)(intptr_t)device_get_ivars(child); 574 if (channel >= 0) 575 retval += printf(" at channel %d", channel); 576 retval += bus_print_child_footer(dev, child); 577 return (retval); 578 } 579 580 int 581 ahci_child_location_str(device_t dev, device_t child, char *buf, 582 size_t buflen) 583 { 584 int channel; 585 586 channel = (int)(intptr_t)device_get_ivars(child); 587 if (channel >= 0) 588 snprintf(buf, buflen, "channel=%d", channel); 589 return (0); 590 } 591 592 bus_dma_tag_t 593 ahci_get_dma_tag(device_t dev, device_t child) 594 { 595 struct ahci_controller *ctlr = device_get_softc(dev); 596 597 return (ctlr->dma_tag); 598 } 599 600 static int 601 ahci_ch_probe(device_t dev) 602 { 603 604 device_set_desc_copy(dev, "AHCI channel"); 605 return (0); 606 } 607 608 static int 609 ahci_ch_attach(device_t dev) 610 { 611 struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev)); 612 struct ahci_channel *ch = device_get_softc(dev); 613 struct cam_devq *devq; 614 int rid, error, i, sata_rev = 0; 615 u_int32_t version; 616 617 ch->dev = dev; 618 ch->unit = (intptr_t)device_get_ivars(dev); 619 ch->caps = ctlr->caps; 620 ch->caps2 = ctlr->caps2; 621 ch->quirks = ctlr->quirks; 622 ch->vendorid = ctlr->vendorid; 623 ch->deviceid = ctlr->deviceid; 624 ch->subvendorid = ctlr->subvendorid; 625 ch->subdeviceid = ctlr->subdeviceid; 626 ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1; 627 mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF); 628 ch->pm_level = 0; 629 resource_int_value(device_get_name(dev), 630 device_get_unit(dev), "pm_level", &ch->pm_level); 631 STAILQ_INIT(&ch->doneq); 632 if (ch->pm_level > 3) 633 callout_init_mtx(&ch->pm_timer, &ch->mtx, 0); 634 callout_init_mtx(&ch->reset_timer, &ch->mtx, 0); 635 /* JMicron external ports (0) sometimes limited */ 636 if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0) 637 sata_rev = 1; 638 if (ch->quirks & AHCI_Q_SATA2) 639 sata_rev = 2; 640 resource_int_value(device_get_name(dev), 641 device_get_unit(dev), "sata_rev", &sata_rev); 642 for (i = 0; i < 16; i++) { 643 ch->user[i].revision = sata_rev; 644 ch->user[i].mode = 0; 645 ch->user[i].bytecount = 8192; 646 ch->user[i].tags = ch->numslots; 647 ch->user[i].caps = 0; 648 ch->curr[i] = ch->user[i]; 649 if (ch->pm_level) { 650 ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ | 651 CTS_SATA_CAPS_H_APST | 652 CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST; 653 } 654 ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA | 655 CTS_SATA_CAPS_H_AN; 656 } 657 rid = 0; 658 if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 659 &rid, RF_ACTIVE))) 660 return (ENXIO); 661 ahci_dmainit(dev); 662 ahci_slotsalloc(dev); 663 ahci_ch_init(dev); 664 mtx_lock(&ch->mtx); 665 rid = ATA_IRQ_RID; 666 if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 667 &rid, RF_SHAREABLE | RF_ACTIVE))) { 668 device_printf(dev, "Unable to map interrupt\n"); 669 error = ENXIO; 670 goto err0; 671 } 672 if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 673 ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr, 674 ch, &ch->ih))) { 675 device_printf(dev, "Unable to setup interrupt\n"); 676 error = ENXIO; 677 goto err1; 678 } 679 ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD); 680 version = ATA_INL(ctlr->r_mem, AHCI_VS); 681 if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS)) 682 ch->chcaps |= AHCI_P_CMD_FBSCP; 683 if (ch->caps2 & AHCI_CAP2_SDS) 684 ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP); 685 if (bootverbose) { 686 device_printf(dev, "Caps:%s%s%s%s%s%s\n", 687 (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"", 688 (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"", 689 (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"", 690 (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"", 691 (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"", 692 (ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":""); 693 } 694 /* Create the device queue for our SIM. */ 695 devq = cam_simq_alloc(ch->numslots); 696 if (devq == NULL) { 697 device_printf(dev, "Unable to allocate simq\n"); 698 error = ENOMEM; 699 goto err1; 700 } 701 /* Construct SIM entry */ 702 ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch, 703 device_get_unit(dev), (struct mtx *)&ch->mtx, 704 min(2, ch->numslots), 705 (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0, 706 devq); 707 if (ch->sim == NULL) { 708 cam_simq_free(devq); 709 device_printf(dev, "unable to allocate sim\n"); 710 error = ENOMEM; 711 goto err1; 712 } 713 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { 714 device_printf(dev, "unable to register xpt bus\n"); 715 error = ENXIO; 716 goto err2; 717 } 718 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), 719 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 720 device_printf(dev, "unable to create path\n"); 721 error = ENXIO; 722 goto err3; 723 } 724 if (ch->pm_level > 3) { 725 callout_reset(&ch->pm_timer, 726 (ch->pm_level == 4) ? hz / 1000 : hz / 8, 727 ahci_ch_pm, ch); 728 } 729 mtx_unlock(&ch->mtx); 730 return (0); 731 732 err3: 733 xpt_bus_deregister(cam_sim_path(ch->sim)); 734 err2: 735 cam_sim_free(ch->sim, /*free_devq*/TRUE); 736 err1: 737 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 738 err0: 739 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); 740 mtx_unlock(&ch->mtx); 741 mtx_destroy(&ch->mtx); 742 return (error); 743 } 744 745 static int 746 ahci_ch_detach(device_t dev) 747 { 748 struct ahci_channel *ch = device_get_softc(dev); 749 750 mtx_lock(&ch->mtx); 751 xpt_async(AC_LOST_DEVICE, ch->path, NULL); 752 /* Forget about reset. */ 753 if (ch->resetting) { 754 ch->resetting = 0; 755 xpt_release_simq(ch->sim, TRUE); 756 } 757 xpt_free_path(ch->path); 758 xpt_bus_deregister(cam_sim_path(ch->sim)); 759 cam_sim_free(ch->sim, /*free_devq*/TRUE); 760 mtx_unlock(&ch->mtx); 761 762 if (ch->pm_level > 3) 763 callout_drain(&ch->pm_timer); 764 callout_drain(&ch->reset_timer); 765 bus_teardown_intr(dev, ch->r_irq, ch->ih); 766 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 767 768 ahci_ch_deinit(dev); 769 ahci_slotsfree(dev); 770 ahci_dmafini(dev); 771 772 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); 773 mtx_destroy(&ch->mtx); 774 return (0); 775 } 776 777 static int 778 ahci_ch_init(device_t dev) 779 { 780 struct ahci_channel *ch = device_get_softc(dev); 781 uint64_t work; 782 783 /* Disable port interrupts */ 784 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 785 /* Setup work areas */ 786 work = ch->dma.work_bus + AHCI_CL_OFFSET; 787 ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff); 788 ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32); 789 work = ch->dma.rfis_bus; 790 ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff); 791 ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32); 792 /* Activate the channel and power/spin up device */ 793 ATA_OUTL(ch->r_mem, AHCI_P_CMD, 794 (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD | 795 ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) | 796 ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 ))); 797 ahci_start_fr(ch); 798 ahci_start(ch, 1); 799 return (0); 800 } 801 802 static int 803 ahci_ch_deinit(device_t dev) 804 { 805 struct ahci_channel *ch = device_get_softc(dev); 806 807 /* Disable port interrupts. */ 808 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 809 /* Reset command register. */ 810 ahci_stop(ch); 811 ahci_stop_fr(ch); 812 ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0); 813 /* Allow everything, including partial and slumber modes. */ 814 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0); 815 /* Request slumber mode transition and give some time to get there. */ 816 ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER); 817 DELAY(100); 818 /* Disable PHY. */ 819 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); 820 return (0); 821 } 822 823 static int 824 ahci_ch_suspend(device_t dev) 825 { 826 struct ahci_channel *ch = device_get_softc(dev); 827 828 mtx_lock(&ch->mtx); 829 xpt_freeze_simq(ch->sim, 1); 830 /* Forget about reset. */ 831 if (ch->resetting) { 832 ch->resetting = 0; 833 callout_stop(&ch->reset_timer); 834 xpt_release_simq(ch->sim, TRUE); 835 } 836 while (ch->oslots) 837 msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100); 838 ahci_ch_deinit(dev); 839 mtx_unlock(&ch->mtx); 840 return (0); 841 } 842 843 static int 844 ahci_ch_resume(device_t dev) 845 { 846 struct ahci_channel *ch = device_get_softc(dev); 847 848 mtx_lock(&ch->mtx); 849 ahci_ch_init(dev); 850 ahci_reset(ch); 851 xpt_release_simq(ch->sim, TRUE); 852 mtx_unlock(&ch->mtx); 853 return (0); 854 } 855 856 devclass_t ahcich_devclass; 857 static device_method_t ahcich_methods[] = { 858 DEVMETHOD(device_probe, ahci_ch_probe), 859 DEVMETHOD(device_attach, ahci_ch_attach), 860 DEVMETHOD(device_detach, ahci_ch_detach), 861 DEVMETHOD(device_suspend, ahci_ch_suspend), 862 DEVMETHOD(device_resume, ahci_ch_resume), 863 { 0, 0 } 864 }; 865 static driver_t ahcich_driver = { 866 "ahcich", 867 ahcich_methods, 868 sizeof(struct ahci_channel) 869 }; 870 DRIVER_MODULE(ahcich, ahci, ahcich_driver, ahcich_devclass, 0, 0); 871 872 struct ahci_dc_cb_args { 873 bus_addr_t maddr; 874 int error; 875 }; 876 877 static void 878 ahci_dmainit(device_t dev) 879 { 880 struct ahci_channel *ch = device_get_softc(dev); 881 struct ahci_dc_cb_args dcba; 882 size_t rfsize; 883 884 /* Command area. */ 885 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, 886 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 887 NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE, 888 0, NULL, NULL, &ch->dma.work_tag)) 889 goto error; 890 if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, 891 BUS_DMA_ZERO, &ch->dma.work_map)) 892 goto error; 893 if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, 894 AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) { 895 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); 896 goto error; 897 } 898 ch->dma.work_bus = dcba.maddr; 899 /* FIS receive area. */ 900 if (ch->chcaps & AHCI_P_CMD_FBSCP) 901 rfsize = 4096; 902 else 903 rfsize = 256; 904 if (bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0, 905 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 906 NULL, NULL, rfsize, 1, rfsize, 907 0, NULL, NULL, &ch->dma.rfis_tag)) 908 goto error; 909 if (bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0, 910 &ch->dma.rfis_map)) 911 goto error; 912 if (bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis, 913 rfsize, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) { 914 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); 915 goto error; 916 } 917 ch->dma.rfis_bus = dcba.maddr; 918 /* Data area. */ 919 if (bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, 920 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 921 NULL, NULL, 922 AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots, 923 AHCI_SG_ENTRIES, AHCI_PRD_MAX, 924 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) { 925 goto error; 926 } 927 return; 928 929 error: 930 device_printf(dev, "WARNING - DMA initialization failed\n"); 931 ahci_dmafini(dev); 932 } 933 934 static void 935 ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 936 { 937 struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc; 938 939 if (!(dcba->error = error)) 940 dcba->maddr = segs[0].ds_addr; 941 } 942 943 static void 944 ahci_dmafini(device_t dev) 945 { 946 struct ahci_channel *ch = device_get_softc(dev); 947 948 if (ch->dma.data_tag) { 949 bus_dma_tag_destroy(ch->dma.data_tag); 950 ch->dma.data_tag = NULL; 951 } 952 if (ch->dma.rfis_bus) { 953 bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map); 954 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); 955 ch->dma.rfis_bus = 0; 956 ch->dma.rfis = NULL; 957 } 958 if (ch->dma.work_bus) { 959 bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); 960 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); 961 ch->dma.work_bus = 0; 962 ch->dma.work = NULL; 963 } 964 if (ch->dma.work_tag) { 965 bus_dma_tag_destroy(ch->dma.work_tag); 966 ch->dma.work_tag = NULL; 967 } 968 } 969 970 static void 971 ahci_slotsalloc(device_t dev) 972 { 973 struct ahci_channel *ch = device_get_softc(dev); 974 int i; 975 976 /* Alloc and setup command/dma slots */ 977 bzero(ch->slot, sizeof(ch->slot)); 978 for (i = 0; i < ch->numslots; i++) { 979 struct ahci_slot *slot = &ch->slot[i]; 980 981 slot->ch = ch; 982 slot->slot = i; 983 slot->state = AHCI_SLOT_EMPTY; 984 slot->ccb = NULL; 985 callout_init_mtx(&slot->timeout, &ch->mtx, 0); 986 987 if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) 988 device_printf(ch->dev, "FAILURE - create data_map\n"); 989 } 990 } 991 992 static void 993 ahci_slotsfree(device_t dev) 994 { 995 struct ahci_channel *ch = device_get_softc(dev); 996 int i; 997 998 /* Free all dma slots */ 999 for (i = 0; i < ch->numslots; i++) { 1000 struct ahci_slot *slot = &ch->slot[i]; 1001 1002 callout_drain(&slot->timeout); 1003 if (slot->dma.data_map) { 1004 bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); 1005 slot->dma.data_map = NULL; 1006 } 1007 } 1008 } 1009 1010 static int 1011 ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr) 1012 { 1013 1014 if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) || 1015 ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) { 1016 u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS); 1017 union ccb *ccb; 1018 1019 if (bootverbose) { 1020 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) 1021 device_printf(ch->dev, "CONNECT requested\n"); 1022 else 1023 device_printf(ch->dev, "DISCONNECT requested\n"); 1024 } 1025 ahci_reset(ch); 1026 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 1027 return (0); 1028 if (xpt_create_path(&ccb->ccb_h.path, NULL, 1029 cam_sim_path(ch->sim), 1030 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1031 xpt_free_ccb(ccb); 1032 return (0); 1033 } 1034 xpt_rescan(ccb); 1035 return (1); 1036 } 1037 return (0); 1038 } 1039 1040 static void 1041 ahci_cpd_check_events(struct ahci_channel *ch) 1042 { 1043 u_int32_t status; 1044 union ccb *ccb; 1045 device_t dev; 1046 1047 if (ch->pm_level == 0) 1048 return; 1049 1050 status = ATA_INL(ch->r_mem, AHCI_P_CMD); 1051 if ((status & AHCI_P_CMD_CPD) == 0) 1052 return; 1053 1054 if (bootverbose) { 1055 dev = ch->dev; 1056 if (status & AHCI_P_CMD_CPS) { 1057 device_printf(dev, "COLD CONNECT requested\n"); 1058 } else 1059 device_printf(dev, "COLD DISCONNECT requested\n"); 1060 } 1061 ahci_reset(ch); 1062 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 1063 return; 1064 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), 1065 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1066 xpt_free_ccb(ccb); 1067 return; 1068 } 1069 xpt_rescan(ccb); 1070 } 1071 1072 static void 1073 ahci_notify_events(struct ahci_channel *ch, u_int32_t status) 1074 { 1075 struct cam_path *dpath; 1076 int i; 1077 1078 if (ch->caps & AHCI_CAP_SSNTF) 1079 ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status); 1080 if (bootverbose) 1081 device_printf(ch->dev, "SNTF 0x%04x\n", status); 1082 for (i = 0; i < 16; i++) { 1083 if ((status & (1 << i)) == 0) 1084 continue; 1085 if (xpt_create_path(&dpath, NULL, 1086 xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { 1087 xpt_async(AC_SCSI_AEN, dpath, NULL); 1088 xpt_free_path(dpath); 1089 } 1090 } 1091 } 1092 1093 static void 1094 ahci_done(struct ahci_channel *ch, union ccb *ccb) 1095 { 1096 1097 mtx_assert(&ch->mtx, MA_OWNED); 1098 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 || 1099 ch->batch == 0) { 1100 xpt_done(ccb); 1101 return; 1102 } 1103 1104 STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe); 1105 } 1106 1107 static void 1108 ahci_ch_intr(void *arg) 1109 { 1110 struct ahci_channel *ch = (struct ahci_channel *)arg; 1111 uint32_t istatus; 1112 1113 /* Read interrupt statuses. */ 1114 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 1115 if (istatus == 0) 1116 return; 1117 1118 mtx_lock(&ch->mtx); 1119 ahci_ch_intr_main(ch, istatus); 1120 mtx_unlock(&ch->mtx); 1121 } 1122 1123 static void 1124 ahci_ch_intr_direct(void *arg) 1125 { 1126 struct ahci_channel *ch = (struct ahci_channel *)arg; 1127 struct ccb_hdr *ccb_h; 1128 uint32_t istatus; 1129 1130 /* Read interrupt statuses. */ 1131 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 1132 if (istatus == 0) 1133 return; 1134 1135 mtx_lock(&ch->mtx); 1136 ch->batch = 1; 1137 ahci_ch_intr_main(ch, istatus); 1138 ch->batch = 0; 1139 mtx_unlock(&ch->mtx); 1140 while ((ccb_h = STAILQ_FIRST(&ch->doneq)) != NULL) { 1141 STAILQ_REMOVE_HEAD(&ch->doneq, sim_links.stqe); 1142 xpt_done_direct((union ccb *)ccb_h); 1143 } 1144 } 1145 1146 static void 1147 ahci_ch_pm(void *arg) 1148 { 1149 struct ahci_channel *ch = (struct ahci_channel *)arg; 1150 uint32_t work; 1151 1152 if (ch->numrslots != 0) 1153 return; 1154 work = ATA_INL(ch->r_mem, AHCI_P_CMD); 1155 if (ch->pm_level == 4) 1156 work |= AHCI_P_CMD_PARTIAL; 1157 else 1158 work |= AHCI_P_CMD_SLUMBER; 1159 ATA_OUTL(ch->r_mem, AHCI_P_CMD, work); 1160 } 1161 1162 static void 1163 ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus) 1164 { 1165 uint32_t cstatus, serr = 0, sntf = 0, ok, err; 1166 enum ahci_err_type et; 1167 int i, ccs, port, reset = 0; 1168 1169 /* Clear interrupt statuses. */ 1170 ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus); 1171 /* Read command statuses. */ 1172 if (ch->numtslots != 0) 1173 cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); 1174 else 1175 cstatus = 0; 1176 if (ch->numrslots != ch->numtslots) 1177 cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI); 1178 /* Read SNTF in one of possible ways. */ 1179 if ((istatus & AHCI_P_IX_SDB) && 1180 (ch->pm_present || ch->curr[0].atapi != 0)) { 1181 if (ch->caps & AHCI_CAP_SSNTF) 1182 sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF); 1183 else if (ch->fbs_enabled) { 1184 u_int8_t *fis = ch->dma.rfis + 0x58; 1185 1186 for (i = 0; i < 16; i++) { 1187 if (fis[1] & 0x80) { 1188 fis[1] &= 0x7f; 1189 sntf |= 1 << i; 1190 } 1191 fis += 256; 1192 } 1193 } else { 1194 u_int8_t *fis = ch->dma.rfis + 0x58; 1195 1196 if (fis[1] & 0x80) 1197 sntf = (1 << (fis[1] & 0x0f)); 1198 } 1199 } 1200 /* Process PHY events */ 1201 if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF | 1202 AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { 1203 serr = ATA_INL(ch->r_mem, AHCI_P_SERR); 1204 if (serr) { 1205 ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr); 1206 reset = ahci_phy_check_events(ch, serr); 1207 } 1208 } 1209 /* Process cold presence detection events */ 1210 if ((istatus & AHCI_P_IX_CPD) && !reset) 1211 ahci_cpd_check_events(ch); 1212 /* Process command errors */ 1213 if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF | 1214 AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { 1215 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) 1216 >> AHCI_P_CMD_CCS_SHIFT; 1217 //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n", 1218 // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), 1219 // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs); 1220 port = -1; 1221 if (ch->fbs_enabled) { 1222 uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS); 1223 if (fbs & AHCI_P_FBS_SDE) { 1224 port = (fbs & AHCI_P_FBS_DWE) 1225 >> AHCI_P_FBS_DWE_SHIFT; 1226 } else { 1227 for (i = 0; i < 16; i++) { 1228 if (ch->numrslotspd[i] == 0) 1229 continue; 1230 if (port == -1) 1231 port = i; 1232 else if (port != i) { 1233 port = -2; 1234 break; 1235 } 1236 } 1237 } 1238 } 1239 err = ch->rslots & cstatus; 1240 } else { 1241 ccs = 0; 1242 err = 0; 1243 port = -1; 1244 } 1245 /* Complete all successfull commands. */ 1246 ok = ch->rslots & ~cstatus; 1247 for (i = 0; i < ch->numslots; i++) { 1248 if ((ok >> i) & 1) 1249 ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE); 1250 } 1251 /* On error, complete the rest of commands with error statuses. */ 1252 if (err) { 1253 if (ch->frozen) { 1254 union ccb *fccb = ch->frozen; 1255 ch->frozen = NULL; 1256 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 1257 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 1258 xpt_freeze_devq(fccb->ccb_h.path, 1); 1259 fccb->ccb_h.status |= CAM_DEV_QFRZN; 1260 } 1261 ahci_done(ch, fccb); 1262 } 1263 for (i = 0; i < ch->numslots; i++) { 1264 /* XXX: reqests in loading state. */ 1265 if (((err >> i) & 1) == 0) 1266 continue; 1267 if (port >= 0 && 1268 ch->slot[i].ccb->ccb_h.target_id != port) 1269 continue; 1270 if (istatus & AHCI_P_IX_TFE) { 1271 if (port != -2) { 1272 /* Task File Error */ 1273 if (ch->numtslotspd[ 1274 ch->slot[i].ccb->ccb_h.target_id] == 0) { 1275 /* Untagged operation. */ 1276 if (i == ccs) 1277 et = AHCI_ERR_TFE; 1278 else 1279 et = AHCI_ERR_INNOCENT; 1280 } else { 1281 /* Tagged operation. */ 1282 et = AHCI_ERR_NCQ; 1283 } 1284 } else { 1285 et = AHCI_ERR_TFE; 1286 ch->fatalerr = 1; 1287 } 1288 } else if (istatus & AHCI_P_IX_IF) { 1289 if (ch->numtslots == 0 && i != ccs && port != -2) 1290 et = AHCI_ERR_INNOCENT; 1291 else 1292 et = AHCI_ERR_SATA; 1293 } else 1294 et = AHCI_ERR_INVALID; 1295 ahci_end_transaction(&ch->slot[i], et); 1296 } 1297 /* 1298 * We can't reinit port if there are some other 1299 * commands active, use resume to complete them. 1300 */ 1301 if (ch->rslots != 0 && !ch->recoverycmd) 1302 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC); 1303 } 1304 /* Process NOTIFY events */ 1305 if (sntf) 1306 ahci_notify_events(ch, sntf); 1307 } 1308 1309 /* Must be called with channel locked. */ 1310 static int 1311 ahci_check_collision(struct ahci_channel *ch, union ccb *ccb) 1312 { 1313 int t = ccb->ccb_h.target_id; 1314 1315 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1316 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1317 /* Tagged command while we have no supported tag free. */ 1318 if (((~ch->oslots) & (0xffffffff >> (32 - 1319 ch->curr[t].tags))) == 0) 1320 return (1); 1321 /* If we have FBS */ 1322 if (ch->fbs_enabled) { 1323 /* Tagged command while untagged are active. */ 1324 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0) 1325 return (1); 1326 } else { 1327 /* Tagged command while untagged are active. */ 1328 if (ch->numrslots != 0 && ch->numtslots == 0) 1329 return (1); 1330 /* Tagged command while tagged to other target is active. */ 1331 if (ch->numtslots != 0 && 1332 ch->taggedtarget != ccb->ccb_h.target_id) 1333 return (1); 1334 } 1335 } else { 1336 /* If we have FBS */ 1337 if (ch->fbs_enabled) { 1338 /* Untagged command while tagged are active. */ 1339 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0) 1340 return (1); 1341 } else { 1342 /* Untagged command while tagged are active. */ 1343 if (ch->numrslots != 0 && ch->numtslots != 0) 1344 return (1); 1345 } 1346 } 1347 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1348 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { 1349 /* Atomic command while anything active. */ 1350 if (ch->numrslots != 0) 1351 return (1); 1352 } 1353 /* We have some atomic command running. */ 1354 if (ch->aslots != 0) 1355 return (1); 1356 return (0); 1357 } 1358 1359 /* Must be called with channel locked. */ 1360 static void 1361 ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb) 1362 { 1363 struct ahci_slot *slot; 1364 int tag, tags; 1365 1366 /* Choose empty slot. */ 1367 tags = ch->numslots; 1368 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1369 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) 1370 tags = ch->curr[ccb->ccb_h.target_id].tags; 1371 if (ch->lastslot + 1 < tags) 1372 tag = ffs(~(ch->oslots >> (ch->lastslot + 1))); 1373 else 1374 tag = 0; 1375 if (tag == 0 || tag + ch->lastslot >= tags) 1376 tag = ffs(~ch->oslots) - 1; 1377 else 1378 tag += ch->lastslot; 1379 ch->lastslot = tag; 1380 /* Occupy chosen slot. */ 1381 slot = &ch->slot[tag]; 1382 slot->ccb = ccb; 1383 /* Stop PM timer. */ 1384 if (ch->numrslots == 0 && ch->pm_level > 3) 1385 callout_stop(&ch->pm_timer); 1386 /* Update channel stats. */ 1387 ch->oslots |= (1 << tag); 1388 ch->numrslots++; 1389 ch->numrslotspd[ccb->ccb_h.target_id]++; 1390 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1391 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1392 ch->numtslots++; 1393 ch->numtslotspd[ccb->ccb_h.target_id]++; 1394 ch->taggedtarget = ccb->ccb_h.target_id; 1395 } 1396 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1397 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) 1398 ch->aslots |= (1 << tag); 1399 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1400 slot->state = AHCI_SLOT_LOADING; 1401 bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, 1402 ahci_dmasetprd, slot, 0); 1403 } else { 1404 slot->dma.nsegs = 0; 1405 ahci_execute_transaction(slot); 1406 } 1407 } 1408 1409 /* Locked by busdma engine. */ 1410 static void 1411 ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1412 { 1413 struct ahci_slot *slot = arg; 1414 struct ahci_channel *ch = slot->ch; 1415 struct ahci_cmd_tab *ctp; 1416 struct ahci_dma_prd *prd; 1417 int i; 1418 1419 if (error) { 1420 device_printf(ch->dev, "DMA load error\n"); 1421 ahci_end_transaction(slot, AHCI_ERR_INVALID); 1422 return; 1423 } 1424 KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n")); 1425 /* Get a piece of the workspace for this request */ 1426 ctp = (struct ahci_cmd_tab *) 1427 (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); 1428 /* Fill S/G table */ 1429 prd = &ctp->prd_tab[0]; 1430 for (i = 0; i < nsegs; i++) { 1431 prd[i].dba = htole64(segs[i].ds_addr); 1432 prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK); 1433 } 1434 slot->dma.nsegs = nsegs; 1435 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, 1436 ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? 1437 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1438 ahci_execute_transaction(slot); 1439 } 1440 1441 /* Must be called with channel locked. */ 1442 static void 1443 ahci_execute_transaction(struct ahci_slot *slot) 1444 { 1445 struct ahci_channel *ch = slot->ch; 1446 struct ahci_cmd_tab *ctp; 1447 struct ahci_cmd_list *clp; 1448 union ccb *ccb = slot->ccb; 1449 int port = ccb->ccb_h.target_id & 0x0f; 1450 int fis_size, i, softreset; 1451 uint8_t *fis = ch->dma.rfis + 0x40; 1452 uint8_t val; 1453 1454 /* Get a piece of the workspace for this request */ 1455 ctp = (struct ahci_cmd_tab *) 1456 (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); 1457 /* Setup the FIS for this request */ 1458 if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) { 1459 device_printf(ch->dev, "Setting up SATA FIS failed\n"); 1460 ahci_end_transaction(slot, AHCI_ERR_INVALID); 1461 return; 1462 } 1463 /* Setup the command list entry */ 1464 clp = (struct ahci_cmd_list *) 1465 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); 1466 clp->cmd_flags = htole16( 1467 (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) | 1468 (ccb->ccb_h.func_code == XPT_SCSI_IO ? 1469 (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) | 1470 (fis_size / sizeof(u_int32_t)) | 1471 (port << 12)); 1472 clp->prd_length = htole16(slot->dma.nsegs); 1473 /* Special handling for Soft Reset command. */ 1474 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1475 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) { 1476 if (ccb->ataio.cmd.control & ATA_A_RESET) { 1477 softreset = 1; 1478 /* Kick controller into sane state */ 1479 ahci_stop(ch); 1480 ahci_clo(ch); 1481 ahci_start(ch, 0); 1482 clp->cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY; 1483 } else { 1484 softreset = 2; 1485 /* Prepare FIS receive area for check. */ 1486 for (i = 0; i < 20; i++) 1487 fis[i] = 0xff; 1488 } 1489 } else 1490 softreset = 0; 1491 clp->bytecount = 0; 1492 clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET + 1493 (AHCI_CT_SIZE * slot->slot)); 1494 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 1495 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1496 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, 1497 BUS_DMASYNC_PREREAD); 1498 /* Set ACTIVE bit for NCQ commands. */ 1499 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1500 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1501 ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot); 1502 } 1503 /* If FBS is enabled, set PMP port. */ 1504 if (ch->fbs_enabled) { 1505 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | 1506 (port << AHCI_P_FBS_DEV_SHIFT)); 1507 } 1508 /* Issue command to the controller. */ 1509 slot->state = AHCI_SLOT_RUNNING; 1510 ch->rslots |= (1 << slot->slot); 1511 ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot)); 1512 /* Device reset commands doesn't interrupt. Poll them. */ 1513 if (ccb->ccb_h.func_code == XPT_ATA_IO && 1514 (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) { 1515 int count, timeout = ccb->ccb_h.timeout * 100; 1516 enum ahci_err_type et = AHCI_ERR_NONE; 1517 1518 for (count = 0; count < timeout; count++) { 1519 DELAY(10); 1520 if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot))) 1521 break; 1522 if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) && 1523 softreset != 1) { 1524 #if 0 1525 device_printf(ch->dev, 1526 "Poll error on slot %d, TFD: %04x\n", 1527 slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD)); 1528 #endif 1529 et = AHCI_ERR_TFE; 1530 break; 1531 } 1532 /* Workaround for ATI SB600/SB700 chipsets. */ 1533 if (ccb->ccb_h.target_id == 15 && 1534 (ch->quirks & AHCI_Q_ATI_PMP_BUG) && 1535 (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) { 1536 et = AHCI_ERR_TIMEOUT; 1537 break; 1538 } 1539 } 1540 1541 /* 1542 * Marvell HBAs with non-RAID firmware do not wait for 1543 * readiness after soft reset, so we have to wait here. 1544 * Marvell RAIDs do not have this problem, but instead 1545 * sometimes forget to update FIS receive area, breaking 1546 * this wait. 1547 */ 1548 if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 && 1549 (ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 && 1550 softreset == 2 && et == AHCI_ERR_NONE) { 1551 while ((val = fis[2]) & ATA_S_BUSY) { 1552 DELAY(10); 1553 if (count++ >= timeout) 1554 break; 1555 } 1556 } 1557 1558 if (timeout && (count >= timeout)) { 1559 device_printf(ch->dev, "Poll timeout on slot %d port %d\n", 1560 slot->slot, port); 1561 device_printf(ch->dev, "is %08x cs %08x ss %08x " 1562 "rs %08x tfd %02x serr %08x cmd %08x\n", 1563 ATA_INL(ch->r_mem, AHCI_P_IS), 1564 ATA_INL(ch->r_mem, AHCI_P_CI), 1565 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, 1566 ATA_INL(ch->r_mem, AHCI_P_TFD), 1567 ATA_INL(ch->r_mem, AHCI_P_SERR), 1568 ATA_INL(ch->r_mem, AHCI_P_CMD)); 1569 et = AHCI_ERR_TIMEOUT; 1570 } 1571 1572 /* Kick controller into sane state and enable FBS. */ 1573 if (softreset == 2) 1574 ch->eslots |= (1 << slot->slot); 1575 ahci_end_transaction(slot, et); 1576 return; 1577 } 1578 /* Start command execution timeout */ 1579 callout_reset(&slot->timeout, (int)ccb->ccb_h.timeout * hz / 2000, 1580 (timeout_t*)ahci_timeout, slot); 1581 return; 1582 } 1583 1584 /* Must be called with channel locked. */ 1585 static void 1586 ahci_process_timeout(struct ahci_channel *ch) 1587 { 1588 int i; 1589 1590 mtx_assert(&ch->mtx, MA_OWNED); 1591 /* Handle the rest of commands. */ 1592 for (i = 0; i < ch->numslots; i++) { 1593 /* Do we have a running request on slot? */ 1594 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 1595 continue; 1596 ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT); 1597 } 1598 } 1599 1600 /* Must be called with channel locked. */ 1601 static void 1602 ahci_rearm_timeout(struct ahci_channel *ch) 1603 { 1604 int i; 1605 1606 mtx_assert(&ch->mtx, MA_OWNED); 1607 for (i = 0; i < ch->numslots; i++) { 1608 struct ahci_slot *slot = &ch->slot[i]; 1609 1610 /* Do we have a running request on slot? */ 1611 if (slot->state < AHCI_SLOT_RUNNING) 1612 continue; 1613 if ((ch->toslots & (1 << i)) == 0) 1614 continue; 1615 callout_reset(&slot->timeout, 1616 (int)slot->ccb->ccb_h.timeout * hz / 2000, 1617 (timeout_t*)ahci_timeout, slot); 1618 } 1619 } 1620 1621 /* Locked by callout mechanism. */ 1622 static void 1623 ahci_timeout(struct ahci_slot *slot) 1624 { 1625 struct ahci_channel *ch = slot->ch; 1626 device_t dev = ch->dev; 1627 uint32_t sstatus; 1628 int ccs; 1629 int i; 1630 1631 /* Check for stale timeout. */ 1632 if (slot->state < AHCI_SLOT_RUNNING) 1633 return; 1634 1635 /* Check if slot was not being executed last time we checked. */ 1636 if (slot->state < AHCI_SLOT_EXECUTING) { 1637 /* Check if slot started executing. */ 1638 sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); 1639 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) 1640 >> AHCI_P_CMD_CCS_SHIFT; 1641 if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot || 1642 ch->fbs_enabled || ch->wrongccs) 1643 slot->state = AHCI_SLOT_EXECUTING; 1644 else if ((ch->rslots & (1 << ccs)) == 0) { 1645 ch->wrongccs = 1; 1646 slot->state = AHCI_SLOT_EXECUTING; 1647 } 1648 1649 callout_reset(&slot->timeout, 1650 (int)slot->ccb->ccb_h.timeout * hz / 2000, 1651 (timeout_t*)ahci_timeout, slot); 1652 return; 1653 } 1654 1655 device_printf(dev, "Timeout on slot %d port %d\n", 1656 slot->slot, slot->ccb->ccb_h.target_id & 0x0f); 1657 device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x " 1658 "serr %08x cmd %08x\n", 1659 ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), 1660 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, 1661 ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), 1662 ATA_INL(ch->r_mem, AHCI_P_CMD)); 1663 1664 /* Handle frozen command. */ 1665 if (ch->frozen) { 1666 union ccb *fccb = ch->frozen; 1667 ch->frozen = NULL; 1668 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 1669 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 1670 xpt_freeze_devq(fccb->ccb_h.path, 1); 1671 fccb->ccb_h.status |= CAM_DEV_QFRZN; 1672 } 1673 ahci_done(ch, fccb); 1674 } 1675 if (!ch->fbs_enabled && !ch->wrongccs) { 1676 /* Without FBS we know real timeout source. */ 1677 ch->fatalerr = 1; 1678 /* Handle command with timeout. */ 1679 ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT); 1680 /* Handle the rest of commands. */ 1681 for (i = 0; i < ch->numslots; i++) { 1682 /* Do we have a running request on slot? */ 1683 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 1684 continue; 1685 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); 1686 } 1687 } else { 1688 /* With FBS we wait for other commands timeout and pray. */ 1689 if (ch->toslots == 0) 1690 xpt_freeze_simq(ch->sim, 1); 1691 ch->toslots |= (1 << slot->slot); 1692 if ((ch->rslots & ~ch->toslots) == 0) 1693 ahci_process_timeout(ch); 1694 else 1695 device_printf(dev, " ... waiting for slots %08x\n", 1696 ch->rslots & ~ch->toslots); 1697 } 1698 } 1699 1700 /* Must be called with channel locked. */ 1701 static void 1702 ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et) 1703 { 1704 struct ahci_channel *ch = slot->ch; 1705 union ccb *ccb = slot->ccb; 1706 struct ahci_cmd_list *clp; 1707 int lastto; 1708 uint32_t sig; 1709 1710 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 1711 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1712 clp = (struct ahci_cmd_list *) 1713 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); 1714 /* Read result registers to the result struct 1715 * May be incorrect if several commands finished same time, 1716 * so read only when sure or have to. 1717 */ 1718 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1719 struct ata_res *res = &ccb->ataio.res; 1720 1721 if ((et == AHCI_ERR_TFE) || 1722 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { 1723 u_int8_t *fis = ch->dma.rfis + 0x40; 1724 1725 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, 1726 BUS_DMASYNC_POSTREAD); 1727 if (ch->fbs_enabled) { 1728 fis += ccb->ccb_h.target_id * 256; 1729 res->status = fis[2]; 1730 res->error = fis[3]; 1731 } else { 1732 uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD); 1733 1734 res->status = tfd; 1735 res->error = tfd >> 8; 1736 } 1737 res->lba_low = fis[4]; 1738 res->lba_mid = fis[5]; 1739 res->lba_high = fis[6]; 1740 res->device = fis[7]; 1741 res->lba_low_exp = fis[8]; 1742 res->lba_mid_exp = fis[9]; 1743 res->lba_high_exp = fis[10]; 1744 res->sector_count = fis[12]; 1745 res->sector_count_exp = fis[13]; 1746 1747 /* 1748 * Some weird controllers do not return signature in 1749 * FIS receive area. Read it from PxSIG register. 1750 */ 1751 if ((ch->quirks & AHCI_Q_ALTSIG) && 1752 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1753 (ccb->ataio.cmd.control & ATA_A_RESET) == 0) { 1754 sig = ATA_INL(ch->r_mem, AHCI_P_SIG); 1755 res->lba_high = sig >> 24; 1756 res->lba_mid = sig >> 16; 1757 res->lba_low = sig >> 8; 1758 res->sector_count = sig; 1759 } 1760 } else 1761 bzero(res, sizeof(*res)); 1762 if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 && 1763 (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1764 (ch->quirks & AHCI_Q_NOCOUNT) == 0) { 1765 ccb->ataio.resid = 1766 ccb->ataio.dxfer_len - le32toh(clp->bytecount); 1767 } 1768 } else { 1769 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1770 (ch->quirks & AHCI_Q_NOCOUNT) == 0) { 1771 ccb->csio.resid = 1772 ccb->csio.dxfer_len - le32toh(clp->bytecount); 1773 } 1774 } 1775 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1776 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, 1777 (ccb->ccb_h.flags & CAM_DIR_IN) ? 1778 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1779 bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); 1780 } 1781 if (et != AHCI_ERR_NONE) 1782 ch->eslots |= (1 << slot->slot); 1783 /* In case of error, freeze device for proper recovery. */ 1784 if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) && 1785 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 1786 xpt_freeze_devq(ccb->ccb_h.path, 1); 1787 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1788 } 1789 /* Set proper result status. */ 1790 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1791 switch (et) { 1792 case AHCI_ERR_NONE: 1793 ccb->ccb_h.status |= CAM_REQ_CMP; 1794 if (ccb->ccb_h.func_code == XPT_SCSI_IO) 1795 ccb->csio.scsi_status = SCSI_STATUS_OK; 1796 break; 1797 case AHCI_ERR_INVALID: 1798 ch->fatalerr = 1; 1799 ccb->ccb_h.status |= CAM_REQ_INVALID; 1800 break; 1801 case AHCI_ERR_INNOCENT: 1802 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1803 break; 1804 case AHCI_ERR_TFE: 1805 case AHCI_ERR_NCQ: 1806 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 1807 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1808 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1809 } else { 1810 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; 1811 } 1812 break; 1813 case AHCI_ERR_SATA: 1814 ch->fatalerr = 1; 1815 if (!ch->recoverycmd) { 1816 xpt_freeze_simq(ch->sim, 1); 1817 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1818 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1819 } 1820 ccb->ccb_h.status |= CAM_UNCOR_PARITY; 1821 break; 1822 case AHCI_ERR_TIMEOUT: 1823 if (!ch->recoverycmd) { 1824 xpt_freeze_simq(ch->sim, 1); 1825 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1826 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1827 } 1828 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 1829 break; 1830 default: 1831 ch->fatalerr = 1; 1832 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 1833 } 1834 /* Free slot. */ 1835 ch->oslots &= ~(1 << slot->slot); 1836 ch->rslots &= ~(1 << slot->slot); 1837 ch->aslots &= ~(1 << slot->slot); 1838 slot->state = AHCI_SLOT_EMPTY; 1839 slot->ccb = NULL; 1840 /* Update channel stats. */ 1841 ch->numrslots--; 1842 ch->numrslotspd[ccb->ccb_h.target_id]--; 1843 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1844 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1845 ch->numtslots--; 1846 ch->numtslotspd[ccb->ccb_h.target_id]--; 1847 } 1848 /* Cancel timeout state if request completed normally. */ 1849 if (et != AHCI_ERR_TIMEOUT) { 1850 lastto = (ch->toslots == (1 << slot->slot)); 1851 ch->toslots &= ~(1 << slot->slot); 1852 if (lastto) 1853 xpt_release_simq(ch->sim, TRUE); 1854 } 1855 /* If it was first request of reset sequence and there is no error, 1856 * proceed to second request. */ 1857 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1858 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1859 (ccb->ataio.cmd.control & ATA_A_RESET) && 1860 et == AHCI_ERR_NONE) { 1861 ccb->ataio.cmd.control &= ~ATA_A_RESET; 1862 ahci_begin_transaction(ch, ccb); 1863 return; 1864 } 1865 /* If it was our READ LOG command - process it. */ 1866 if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { 1867 ahci_process_read_log(ch, ccb); 1868 /* If it was our REQUEST SENSE command - process it. */ 1869 } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { 1870 ahci_process_request_sense(ch, ccb); 1871 /* If it was NCQ or ATAPI command error, put result on hold. */ 1872 } else if (et == AHCI_ERR_NCQ || 1873 ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && 1874 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { 1875 ch->hold[slot->slot] = ccb; 1876 ch->numhslots++; 1877 } else 1878 ahci_done(ch, ccb); 1879 /* If we have no other active commands, ... */ 1880 if (ch->rslots == 0) { 1881 /* if there was fatal error - reset port. */ 1882 if (ch->toslots != 0 || ch->fatalerr) { 1883 ahci_reset(ch); 1884 } else { 1885 /* if we have slots in error, we can reinit port. */ 1886 if (ch->eslots != 0) { 1887 ahci_stop(ch); 1888 ahci_clo(ch); 1889 ahci_start(ch, 1); 1890 } 1891 /* if there commands on hold, we can do READ LOG. */ 1892 if (!ch->recoverycmd && ch->numhslots) 1893 ahci_issue_recovery(ch); 1894 } 1895 /* If all the rest of commands are in timeout - give them chance. */ 1896 } else if ((ch->rslots & ~ch->toslots) == 0 && 1897 et != AHCI_ERR_TIMEOUT) 1898 ahci_rearm_timeout(ch); 1899 /* Unfreeze frozen command. */ 1900 if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) { 1901 union ccb *fccb = ch->frozen; 1902 ch->frozen = NULL; 1903 ahci_begin_transaction(ch, fccb); 1904 xpt_release_simq(ch->sim, TRUE); 1905 } 1906 /* Start PM timer. */ 1907 if (ch->numrslots == 0 && ch->pm_level > 3 && 1908 (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) { 1909 callout_schedule(&ch->pm_timer, 1910 (ch->pm_level == 4) ? hz / 1000 : hz / 8); 1911 } 1912 } 1913 1914 static void 1915 ahci_issue_recovery(struct ahci_channel *ch) 1916 { 1917 union ccb *ccb; 1918 struct ccb_ataio *ataio; 1919 struct ccb_scsiio *csio; 1920 int i; 1921 1922 /* Find some held command. */ 1923 for (i = 0; i < ch->numslots; i++) { 1924 if (ch->hold[i]) 1925 break; 1926 } 1927 ccb = xpt_alloc_ccb_nowait(); 1928 if (ccb == NULL) { 1929 device_printf(ch->dev, "Unable to allocate recovery command\n"); 1930 completeall: 1931 /* We can't do anything -- complete held commands. */ 1932 for (i = 0; i < ch->numslots; i++) { 1933 if (ch->hold[i] == NULL) 1934 continue; 1935 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 1936 ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; 1937 ahci_done(ch, ch->hold[i]); 1938 ch->hold[i] = NULL; 1939 ch->numhslots--; 1940 } 1941 ahci_reset(ch); 1942 return; 1943 } 1944 ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */ 1945 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1946 /* READ LOG */ 1947 ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; 1948 ccb->ccb_h.func_code = XPT_ATA_IO; 1949 ccb->ccb_h.flags = CAM_DIR_IN; 1950 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ 1951 ataio = &ccb->ataio; 1952 ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT); 1953 if (ataio->data_ptr == NULL) { 1954 xpt_free_ccb(ccb); 1955 device_printf(ch->dev, 1956 "Unable to allocate memory for READ LOG command\n"); 1957 goto completeall; 1958 } 1959 ataio->dxfer_len = 512; 1960 bzero(&ataio->cmd, sizeof(ataio->cmd)); 1961 ataio->cmd.flags = CAM_ATAIO_48BIT; 1962 ataio->cmd.command = 0x2F; /* READ LOG EXT */ 1963 ataio->cmd.sector_count = 1; 1964 ataio->cmd.sector_count_exp = 0; 1965 ataio->cmd.lba_low = 0x10; 1966 ataio->cmd.lba_mid = 0; 1967 ataio->cmd.lba_mid_exp = 0; 1968 } else { 1969 /* REQUEST SENSE */ 1970 ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; 1971 ccb->ccb_h.recovery_slot = i; 1972 ccb->ccb_h.func_code = XPT_SCSI_IO; 1973 ccb->ccb_h.flags = CAM_DIR_IN; 1974 ccb->ccb_h.status = 0; 1975 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ 1976 csio = &ccb->csio; 1977 csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; 1978 csio->dxfer_len = ch->hold[i]->csio.sense_len; 1979 csio->cdb_len = 6; 1980 bzero(&csio->cdb_io, sizeof(csio->cdb_io)); 1981 csio->cdb_io.cdb_bytes[0] = 0x03; 1982 csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; 1983 } 1984 /* Freeze SIM while doing recovery. */ 1985 ch->recoverycmd = 1; 1986 xpt_freeze_simq(ch->sim, 1); 1987 ahci_begin_transaction(ch, ccb); 1988 } 1989 1990 static void 1991 ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb) 1992 { 1993 uint8_t *data; 1994 struct ata_res *res; 1995 int i; 1996 1997 ch->recoverycmd = 0; 1998 1999 data = ccb->ataio.data_ptr; 2000 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2001 (data[0] & 0x80) == 0) { 2002 for (i = 0; i < ch->numslots; i++) { 2003 if (!ch->hold[i]) 2004 continue; 2005 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) 2006 continue; 2007 if ((data[0] & 0x1F) == i) { 2008 res = &ch->hold[i]->ataio.res; 2009 res->status = data[2]; 2010 res->error = data[3]; 2011 res->lba_low = data[4]; 2012 res->lba_mid = data[5]; 2013 res->lba_high = data[6]; 2014 res->device = data[7]; 2015 res->lba_low_exp = data[8]; 2016 res->lba_mid_exp = data[9]; 2017 res->lba_high_exp = data[10]; 2018 res->sector_count = data[12]; 2019 res->sector_count_exp = data[13]; 2020 } else { 2021 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2022 ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; 2023 } 2024 ahci_done(ch, ch->hold[i]); 2025 ch->hold[i] = NULL; 2026 ch->numhslots--; 2027 } 2028 } else { 2029 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 2030 device_printf(ch->dev, "Error while READ LOG EXT\n"); 2031 else if ((data[0] & 0x80) == 0) { 2032 device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n"); 2033 } 2034 for (i = 0; i < ch->numslots; i++) { 2035 if (!ch->hold[i]) 2036 continue; 2037 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) 2038 continue; 2039 ahci_done(ch, ch->hold[i]); 2040 ch->hold[i] = NULL; 2041 ch->numhslots--; 2042 } 2043 } 2044 free(ccb->ataio.data_ptr, M_AHCI); 2045 xpt_free_ccb(ccb); 2046 xpt_release_simq(ch->sim, TRUE); 2047 } 2048 2049 static void 2050 ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb) 2051 { 2052 int i; 2053 2054 ch->recoverycmd = 0; 2055 2056 i = ccb->ccb_h.recovery_slot; 2057 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 2058 ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; 2059 } else { 2060 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2061 ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2062 } 2063 ahci_done(ch, ch->hold[i]); 2064 ch->hold[i] = NULL; 2065 ch->numhslots--; 2066 xpt_free_ccb(ccb); 2067 xpt_release_simq(ch->sim, TRUE); 2068 } 2069 2070 static void 2071 ahci_start(struct ahci_channel *ch, int fbs) 2072 { 2073 u_int32_t cmd; 2074 2075 /* Clear SATA error register */ 2076 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF); 2077 /* Clear any interrupts pending on this channel */ 2078 ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF); 2079 /* Configure FIS-based switching if supported. */ 2080 if (ch->chcaps & AHCI_P_CMD_FBSCP) { 2081 ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0; 2082 ATA_OUTL(ch->r_mem, AHCI_P_FBS, 2083 ch->fbs_enabled ? AHCI_P_FBS_EN : 0); 2084 } 2085 /* Start operations on this channel */ 2086 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2087 cmd &= ~AHCI_P_CMD_PMA; 2088 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST | 2089 (ch->pm_present ? AHCI_P_CMD_PMA : 0)); 2090 } 2091 2092 static void 2093 ahci_stop(struct ahci_channel *ch) 2094 { 2095 u_int32_t cmd; 2096 int timeout; 2097 2098 /* Kill all activity on this channel */ 2099 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2100 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST); 2101 /* Wait for activity stop. */ 2102 timeout = 0; 2103 do { 2104 DELAY(10); 2105 if (timeout++ > 50000) { 2106 device_printf(ch->dev, "stopping AHCI engine failed\n"); 2107 break; 2108 } 2109 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR); 2110 ch->eslots = 0; 2111 } 2112 2113 static void 2114 ahci_clo(struct ahci_channel *ch) 2115 { 2116 u_int32_t cmd; 2117 int timeout; 2118 2119 /* Issue Command List Override if supported */ 2120 if (ch->caps & AHCI_CAP_SCLO) { 2121 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2122 cmd |= AHCI_P_CMD_CLO; 2123 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd); 2124 timeout = 0; 2125 do { 2126 DELAY(10); 2127 if (timeout++ > 50000) { 2128 device_printf(ch->dev, "executing CLO failed\n"); 2129 break; 2130 } 2131 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO); 2132 } 2133 } 2134 2135 static void 2136 ahci_stop_fr(struct ahci_channel *ch) 2137 { 2138 u_int32_t cmd; 2139 int timeout; 2140 2141 /* Kill all FIS reception on this channel */ 2142 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2143 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE); 2144 /* Wait for FIS reception stop. */ 2145 timeout = 0; 2146 do { 2147 DELAY(10); 2148 if (timeout++ > 50000) { 2149 device_printf(ch->dev, "stopping AHCI FR engine failed\n"); 2150 break; 2151 } 2152 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR); 2153 } 2154 2155 static void 2156 ahci_start_fr(struct ahci_channel *ch) 2157 { 2158 u_int32_t cmd; 2159 2160 /* Start FIS reception on this channel */ 2161 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2162 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE); 2163 } 2164 2165 static int 2166 ahci_wait_ready(struct ahci_channel *ch, int t, int t0) 2167 { 2168 int timeout = 0; 2169 uint32_t val; 2170 2171 while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) & 2172 (ATA_S_BUSY | ATA_S_DRQ)) { 2173 if (timeout > t) { 2174 if (t != 0) { 2175 device_printf(ch->dev, 2176 "AHCI reset: device not ready after %dms " 2177 "(tfd = %08x)\n", 2178 MAX(t, 0) + t0, val); 2179 } 2180 return (EBUSY); 2181 } 2182 DELAY(1000); 2183 timeout++; 2184 } 2185 if (bootverbose) 2186 device_printf(ch->dev, "AHCI reset: device ready after %dms\n", 2187 timeout + t0); 2188 return (0); 2189 } 2190 2191 static void 2192 ahci_reset_to(void *arg) 2193 { 2194 struct ahci_channel *ch = arg; 2195 2196 if (ch->resetting == 0) 2197 return; 2198 ch->resetting--; 2199 if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0, 2200 (310 - ch->resetting) * 100) == 0) { 2201 ch->resetting = 0; 2202 ahci_start(ch, 1); 2203 xpt_release_simq(ch->sim, TRUE); 2204 return; 2205 } 2206 if (ch->resetting == 0) { 2207 ahci_clo(ch); 2208 ahci_start(ch, 1); 2209 xpt_release_simq(ch->sim, TRUE); 2210 return; 2211 } 2212 callout_schedule(&ch->reset_timer, hz / 10); 2213 } 2214 2215 static void 2216 ahci_reset(struct ahci_channel *ch) 2217 { 2218 struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); 2219 int i; 2220 2221 xpt_freeze_simq(ch->sim, 1); 2222 if (bootverbose) 2223 device_printf(ch->dev, "AHCI reset...\n"); 2224 /* Forget about previous reset. */ 2225 if (ch->resetting) { 2226 ch->resetting = 0; 2227 callout_stop(&ch->reset_timer); 2228 xpt_release_simq(ch->sim, TRUE); 2229 } 2230 /* Requeue freezed command. */ 2231 if (ch->frozen) { 2232 union ccb *fccb = ch->frozen; 2233 ch->frozen = NULL; 2234 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 2235 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 2236 xpt_freeze_devq(fccb->ccb_h.path, 1); 2237 fccb->ccb_h.status |= CAM_DEV_QFRZN; 2238 } 2239 ahci_done(ch, fccb); 2240 } 2241 /* Kill the engine and requeue all running commands. */ 2242 ahci_stop(ch); 2243 for (i = 0; i < ch->numslots; i++) { 2244 /* Do we have a running request on slot? */ 2245 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 2246 continue; 2247 /* XXX; Commands in loading state. */ 2248 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); 2249 } 2250 for (i = 0; i < ch->numslots; i++) { 2251 if (!ch->hold[i]) 2252 continue; 2253 ahci_done(ch, ch->hold[i]); 2254 ch->hold[i] = NULL; 2255 ch->numhslots--; 2256 } 2257 if (ch->toslots != 0) 2258 xpt_release_simq(ch->sim, TRUE); 2259 ch->eslots = 0; 2260 ch->toslots = 0; 2261 ch->wrongccs = 0; 2262 ch->fatalerr = 0; 2263 /* Tell the XPT about the event */ 2264 xpt_async(AC_BUS_RESET, ch->path, NULL); 2265 /* Disable port interrupts */ 2266 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 2267 /* Reset and reconnect PHY, */ 2268 if (!ahci_sata_phy_reset(ch)) { 2269 if (bootverbose) 2270 device_printf(ch->dev, 2271 "AHCI reset: device not found\n"); 2272 ch->devices = 0; 2273 /* Enable wanted port interrupts */ 2274 ATA_OUTL(ch->r_mem, AHCI_P_IE, 2275 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | 2276 AHCI_P_IX_PRC | AHCI_P_IX_PC)); 2277 xpt_release_simq(ch->sim, TRUE); 2278 return; 2279 } 2280 if (bootverbose) 2281 device_printf(ch->dev, "AHCI reset: device found\n"); 2282 /* Wait for clearing busy status. */ 2283 if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) { 2284 if (dumping) 2285 ahci_clo(ch); 2286 else 2287 ch->resetting = 310; 2288 } 2289 ch->devices = 1; 2290 /* Enable wanted port interrupts */ 2291 ATA_OUTL(ch->r_mem, AHCI_P_IE, 2292 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | 2293 AHCI_P_IX_TFE | AHCI_P_IX_HBF | 2294 AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF | 2295 ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC | 2296 AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) | 2297 AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR))); 2298 if (ch->resetting) 2299 callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch); 2300 else { 2301 ahci_start(ch, 1); 2302 xpt_release_simq(ch->sim, TRUE); 2303 } 2304 } 2305 2306 static int 2307 ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag) 2308 { 2309 u_int8_t *fis = &ctp->cfis[0]; 2310 2311 bzero(fis, 20); 2312 fis[0] = 0x27; /* host to device */ 2313 fis[1] = (ccb->ccb_h.target_id & 0x0f); 2314 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 2315 fis[1] |= 0x80; 2316 fis[2] = ATA_PACKET_CMD; 2317 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 2318 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 2319 fis[3] = ATA_F_DMA; 2320 else { 2321 fis[5] = ccb->csio.dxfer_len; 2322 fis[6] = ccb->csio.dxfer_len >> 8; 2323 } 2324 fis[7] = ATA_D_LBA; 2325 fis[15] = ATA_A_4BIT; 2326 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 2327 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, 2328 ctp->acmd, ccb->csio.cdb_len); 2329 bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len); 2330 } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { 2331 fis[1] |= 0x80; 2332 fis[2] = ccb->ataio.cmd.command; 2333 fis[3] = ccb->ataio.cmd.features; 2334 fis[4] = ccb->ataio.cmd.lba_low; 2335 fis[5] = ccb->ataio.cmd.lba_mid; 2336 fis[6] = ccb->ataio.cmd.lba_high; 2337 fis[7] = ccb->ataio.cmd.device; 2338 fis[8] = ccb->ataio.cmd.lba_low_exp; 2339 fis[9] = ccb->ataio.cmd.lba_mid_exp; 2340 fis[10] = ccb->ataio.cmd.lba_high_exp; 2341 fis[11] = ccb->ataio.cmd.features_exp; 2342 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { 2343 fis[12] = tag << 3; 2344 fis[13] = 0; 2345 } else { 2346 fis[12] = ccb->ataio.cmd.sector_count; 2347 fis[13] = ccb->ataio.cmd.sector_count_exp; 2348 } 2349 fis[15] = ATA_A_4BIT; 2350 } else { 2351 fis[15] = ccb->ataio.cmd.control; 2352 } 2353 return (20); 2354 } 2355 2356 static int 2357 ahci_sata_connect(struct ahci_channel *ch) 2358 { 2359 u_int32_t status; 2360 int timeout, found = 0; 2361 2362 /* Wait up to 100ms for "connect well" */ 2363 for (timeout = 0; timeout < 1000 ; timeout++) { 2364 status = ATA_INL(ch->r_mem, AHCI_P_SSTS); 2365 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) 2366 found = 1; 2367 if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && 2368 ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && 2369 ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) 2370 break; 2371 if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { 2372 if (bootverbose) { 2373 device_printf(ch->dev, "SATA offline status=%08x\n", 2374 status); 2375 } 2376 return (0); 2377 } 2378 if (found == 0 && timeout >= 100) 2379 break; 2380 DELAY(100); 2381 } 2382 if (timeout >= 1000 || !found) { 2383 if (bootverbose) { 2384 device_printf(ch->dev, 2385 "SATA connect timeout time=%dus status=%08x\n", 2386 timeout * 100, status); 2387 } 2388 return (0); 2389 } 2390 if (bootverbose) { 2391 device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", 2392 timeout * 100, status); 2393 } 2394 /* Clear SATA error register */ 2395 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff); 2396 return (1); 2397 } 2398 2399 static int 2400 ahci_sata_phy_reset(struct ahci_channel *ch) 2401 { 2402 int sata_rev; 2403 uint32_t val; 2404 2405 if (ch->listening) { 2406 val = ATA_INL(ch->r_mem, AHCI_P_CMD); 2407 val |= AHCI_P_CMD_SUD; 2408 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); 2409 ch->listening = 0; 2410 } 2411 sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; 2412 if (sata_rev == 1) 2413 val = ATA_SC_SPD_SPEED_GEN1; 2414 else if (sata_rev == 2) 2415 val = ATA_SC_SPD_SPEED_GEN2; 2416 else if (sata_rev == 3) 2417 val = ATA_SC_SPD_SPEED_GEN3; 2418 else 2419 val = 0; 2420 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 2421 ATA_SC_DET_RESET | val | 2422 ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER); 2423 DELAY(1000); 2424 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 2425 ATA_SC_DET_IDLE | val | ((ch->pm_level > 0) ? 0 : 2426 (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); 2427 if (!ahci_sata_connect(ch)) { 2428 if (ch->caps & AHCI_CAP_SSS) { 2429 val = ATA_INL(ch->r_mem, AHCI_P_CMD); 2430 val &= ~AHCI_P_CMD_SUD; 2431 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); 2432 ch->listening = 1; 2433 } else if (ch->pm_level > 0) 2434 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); 2435 return (0); 2436 } 2437 return (1); 2438 } 2439 2440 static int 2441 ahci_check_ids(struct ahci_channel *ch, union ccb *ccb) 2442 { 2443 2444 if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) { 2445 ccb->ccb_h.status = CAM_TID_INVALID; 2446 ahci_done(ch, ccb); 2447 return (-1); 2448 } 2449 if (ccb->ccb_h.target_lun != 0) { 2450 ccb->ccb_h.status = CAM_LUN_INVALID; 2451 ahci_done(ch, ccb); 2452 return (-1); 2453 } 2454 return (0); 2455 } 2456 2457 static void 2458 ahciaction(struct cam_sim *sim, union ccb *ccb) 2459 { 2460 struct ahci_channel *ch; 2461 2462 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n", 2463 ccb->ccb_h.func_code)); 2464 2465 ch = (struct ahci_channel *)cam_sim_softc(sim); 2466 switch (ccb->ccb_h.func_code) { 2467 /* Common cases first */ 2468 case XPT_ATA_IO: /* Execute the requested I/O operation */ 2469 case XPT_SCSI_IO: 2470 if (ahci_check_ids(ch, ccb)) 2471 return; 2472 if (ch->devices == 0 || 2473 (ch->pm_present == 0 && 2474 ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { 2475 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 2476 break; 2477 } 2478 ccb->ccb_h.recovery_type = RECOVERY_NONE; 2479 /* Check for command collision. */ 2480 if (ahci_check_collision(ch, ccb)) { 2481 /* Freeze command. */ 2482 ch->frozen = ccb; 2483 /* We have only one frozen slot, so freeze simq also. */ 2484 xpt_freeze_simq(ch->sim, 1); 2485 return; 2486 } 2487 ahci_begin_transaction(ch, ccb); 2488 return; 2489 case XPT_EN_LUN: /* Enable LUN as a target */ 2490 case XPT_TARGET_IO: /* Execute target I/O request */ 2491 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 2492 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ 2493 case XPT_ABORT: /* Abort the specified CCB */ 2494 /* XXX Implement */ 2495 ccb->ccb_h.status = CAM_REQ_INVALID; 2496 break; 2497 case XPT_SET_TRAN_SETTINGS: 2498 { 2499 struct ccb_trans_settings *cts = &ccb->cts; 2500 struct ahci_device *d; 2501 2502 if (ahci_check_ids(ch, ccb)) 2503 return; 2504 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 2505 d = &ch->curr[ccb->ccb_h.target_id]; 2506 else 2507 d = &ch->user[ccb->ccb_h.target_id]; 2508 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) 2509 d->revision = cts->xport_specific.sata.revision; 2510 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) 2511 d->mode = cts->xport_specific.sata.mode; 2512 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) 2513 d->bytecount = min(8192, cts->xport_specific.sata.bytecount); 2514 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) 2515 d->tags = min(ch->numslots, cts->xport_specific.sata.tags); 2516 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) 2517 ch->pm_present = cts->xport_specific.sata.pm_present; 2518 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) 2519 d->atapi = cts->xport_specific.sata.atapi; 2520 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) 2521 d->caps = cts->xport_specific.sata.caps; 2522 ccb->ccb_h.status = CAM_REQ_CMP; 2523 break; 2524 } 2525 case XPT_GET_TRAN_SETTINGS: 2526 /* Get default/user set transfer settings for the target */ 2527 { 2528 struct ccb_trans_settings *cts = &ccb->cts; 2529 struct ahci_device *d; 2530 uint32_t status; 2531 2532 if (ahci_check_ids(ch, ccb)) 2533 return; 2534 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 2535 d = &ch->curr[ccb->ccb_h.target_id]; 2536 else 2537 d = &ch->user[ccb->ccb_h.target_id]; 2538 cts->protocol = PROTO_UNSPECIFIED; 2539 cts->protocol_version = PROTO_VERSION_UNSPECIFIED; 2540 cts->transport = XPORT_SATA; 2541 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 2542 cts->proto_specific.valid = 0; 2543 cts->xport_specific.sata.valid = 0; 2544 if (cts->type == CTS_TYPE_CURRENT_SETTINGS && 2545 (ccb->ccb_h.target_id == 15 || 2546 (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { 2547 status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK; 2548 if (status & 0x0f0) { 2549 cts->xport_specific.sata.revision = 2550 (status & 0x0f0) >> 4; 2551 cts->xport_specific.sata.valid |= 2552 CTS_SATA_VALID_REVISION; 2553 } 2554 cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; 2555 if (ch->pm_level) { 2556 if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC)) 2557 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; 2558 if (ch->caps2 & AHCI_CAP2_APST) 2559 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST; 2560 } 2561 if ((ch->caps & AHCI_CAP_SNCQ) && 2562 (ch->quirks & AHCI_Q_NOAA) == 0) 2563 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA; 2564 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; 2565 cts->xport_specific.sata.caps &= 2566 ch->user[ccb->ccb_h.target_id].caps; 2567 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 2568 } else { 2569 cts->xport_specific.sata.revision = d->revision; 2570 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; 2571 cts->xport_specific.sata.caps = d->caps; 2572 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 2573 } 2574 cts->xport_specific.sata.mode = d->mode; 2575 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; 2576 cts->xport_specific.sata.bytecount = d->bytecount; 2577 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; 2578 cts->xport_specific.sata.pm_present = ch->pm_present; 2579 cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; 2580 cts->xport_specific.sata.tags = d->tags; 2581 cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; 2582 cts->xport_specific.sata.atapi = d->atapi; 2583 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; 2584 ccb->ccb_h.status = CAM_REQ_CMP; 2585 break; 2586 } 2587 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 2588 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 2589 ahci_reset(ch); 2590 ccb->ccb_h.status = CAM_REQ_CMP; 2591 break; 2592 case XPT_TERM_IO: /* Terminate the I/O process */ 2593 /* XXX Implement */ 2594 ccb->ccb_h.status = CAM_REQ_INVALID; 2595 break; 2596 case XPT_PATH_INQ: /* Path routing inquiry */ 2597 { 2598 struct ccb_pathinq *cpi = &ccb->cpi; 2599 2600 cpi->version_num = 1; /* XXX??? */ 2601 cpi->hba_inquiry = PI_SDTR_ABLE; 2602 if (ch->caps & AHCI_CAP_SNCQ) 2603 cpi->hba_inquiry |= PI_TAG_ABLE; 2604 if (ch->caps & AHCI_CAP_SPM) 2605 cpi->hba_inquiry |= PI_SATAPM; 2606 cpi->target_sprt = 0; 2607 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; 2608 cpi->hba_eng_cnt = 0; 2609 if (ch->caps & AHCI_CAP_SPM) 2610 cpi->max_target = 15; 2611 else 2612 cpi->max_target = 0; 2613 cpi->max_lun = 0; 2614 cpi->initiator_id = 0; 2615 cpi->bus_id = cam_sim_bus(sim); 2616 cpi->base_transfer_speed = 150000; 2617 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2618 strncpy(cpi->hba_vid, "AHCI", HBA_IDLEN); 2619 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2620 cpi->unit_number = cam_sim_unit(sim); 2621 cpi->transport = XPORT_SATA; 2622 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 2623 cpi->protocol = PROTO_ATA; 2624 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 2625 cpi->maxio = MAXPHYS; 2626 /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */ 2627 if (ch->quirks & AHCI_Q_MAXIO_64K) 2628 cpi->maxio = min(cpi->maxio, 128 * 512); 2629 cpi->hba_vendor = ch->vendorid; 2630 cpi->hba_device = ch->deviceid; 2631 cpi->hba_subvendor = ch->subvendorid; 2632 cpi->hba_subdevice = ch->subdeviceid; 2633 cpi->ccb_h.status = CAM_REQ_CMP; 2634 break; 2635 } 2636 default: 2637 ccb->ccb_h.status = CAM_REQ_INVALID; 2638 break; 2639 } 2640 ahci_done(ch, ccb); 2641 } 2642 2643 static void 2644 ahcipoll(struct cam_sim *sim) 2645 { 2646 struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim); 2647 uint32_t istatus; 2648 2649 /* Read interrupt statuses and process if any. */ 2650 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 2651 if (istatus != 0) 2652 ahci_ch_intr_main(ch, istatus); 2653 if (ch->resetting != 0 && 2654 (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) { 2655 ch->resetpolldiv = 1000; 2656 ahci_reset_to(ch); 2657 } 2658 } 2659 MODULE_VERSION(ahci, 1); 2660 MODULE_DEPEND(ahci, cam, 1, 1, 1); 2661