1 /*- 2 * Copyright (c) 2009-2012 Alexander Motin <mav@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/module.h> 32 #include <sys/systm.h> 33 #include <sys/kernel.h> 34 #include <sys/bus.h> 35 #include <sys/conf.h> 36 #include <sys/endian.h> 37 #include <sys/malloc.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <machine/stdarg.h> 41 #include <machine/resource.h> 42 #include <machine/bus.h> 43 #include <sys/rman.h> 44 #include "ahci.h" 45 46 #include <cam/cam.h> 47 #include <cam/cam_ccb.h> 48 #include <cam/cam_sim.h> 49 #include <cam/cam_xpt_sim.h> 50 #include <cam/cam_debug.h> 51 52 /* local prototypes */ 53 static void ahci_intr(void *data); 54 static void ahci_intr_one(void *data); 55 static void ahci_intr_one_edge(void *data); 56 static int ahci_ch_init(device_t dev); 57 static int ahci_ch_deinit(device_t dev); 58 static int ahci_ch_suspend(device_t dev); 59 static int ahci_ch_resume(device_t dev); 60 static void ahci_ch_pm(void *arg); 61 static void ahci_ch_intr(void *arg); 62 static void ahci_ch_intr_direct(void *arg); 63 static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus); 64 static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb); 65 static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 66 static void ahci_execute_transaction(struct ahci_slot *slot); 67 static void ahci_timeout(struct ahci_slot *slot); 68 static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et); 69 static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag); 70 static void ahci_dmainit(device_t dev); 71 static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); 72 static void ahci_dmafini(device_t dev); 73 static void ahci_slotsalloc(device_t dev); 74 static void ahci_slotsfree(device_t dev); 75 static void ahci_reset(struct ahci_channel *ch); 76 static void ahci_start(struct ahci_channel *ch, int fbs); 77 static void ahci_stop(struct ahci_channel *ch); 78 static void ahci_clo(struct ahci_channel *ch); 79 static void ahci_start_fr(struct ahci_channel *ch); 80 static void ahci_stop_fr(struct ahci_channel *ch); 81 82 static int ahci_sata_connect(struct ahci_channel *ch); 83 static int ahci_sata_phy_reset(struct ahci_channel *ch); 84 static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0); 85 86 static void ahci_issue_recovery(struct ahci_channel *ch); 87 static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb); 88 static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb); 89 90 static void ahciaction(struct cam_sim *sim, union ccb *ccb); 91 static void ahcipoll(struct cam_sim *sim); 92 93 static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers"); 94 95 #define recovery_type spriv_field0 96 #define RECOVERY_NONE 0 97 #define RECOVERY_READ_LOG 1 98 #define RECOVERY_REQUEST_SENSE 2 99 #define recovery_slot spriv_field1 100 101 int 102 ahci_ctlr_setup(device_t dev) 103 { 104 struct ahci_controller *ctlr = device_get_softc(dev); 105 /* Clear interrupts */ 106 ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS)); 107 /* Configure CCC */ 108 if (ctlr->ccc) { 109 ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI)); 110 ATA_OUTL(ctlr->r_mem, AHCI_CCCC, 111 (ctlr->ccc << AHCI_CCCC_TV_SHIFT) | 112 (4 << AHCI_CCCC_CC_SHIFT) | 113 AHCI_CCCC_EN); 114 ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) & 115 AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT; 116 if (bootverbose) { 117 device_printf(dev, 118 "CCC with %dms/4cmd enabled on vector %d\n", 119 ctlr->ccc, ctlr->cccv); 120 } 121 } 122 /* Enable AHCI interrupts */ 123 ATA_OUTL(ctlr->r_mem, AHCI_GHC, 124 ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE); 125 return (0); 126 } 127 128 int 129 ahci_ctlr_reset(device_t dev) 130 { 131 struct ahci_controller *ctlr = device_get_softc(dev); 132 int timeout; 133 134 /* Enable AHCI mode */ 135 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); 136 /* Reset AHCI controller */ 137 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR); 138 for (timeout = 1000; timeout > 0; timeout--) { 139 DELAY(1000); 140 if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0) 141 break; 142 } 143 if (timeout == 0) { 144 device_printf(dev, "AHCI controller reset failure\n"); 145 return (ENXIO); 146 } 147 /* Reenable AHCI mode */ 148 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); 149 return (0); 150 } 151 152 153 int 154 ahci_attach(device_t dev) 155 { 156 struct ahci_controller *ctlr = device_get_softc(dev); 157 int error, i, u, speed, unit; 158 u_int32_t version; 159 device_t child; 160 161 ctlr->dev = dev; 162 ctlr->ccc = 0; 163 resource_int_value(device_get_name(dev), 164 device_get_unit(dev), "ccc", &ctlr->ccc); 165 166 /* Setup our own memory management for channels. */ 167 ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); 168 ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); 169 ctlr->sc_iomem.rm_type = RMAN_ARRAY; 170 ctlr->sc_iomem.rm_descr = "I/O memory addresses"; 171 if ((error = rman_init(&ctlr->sc_iomem)) != 0) { 172 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); 173 return (error); 174 } 175 if ((error = rman_manage_region(&ctlr->sc_iomem, 176 rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { 177 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); 178 rman_fini(&ctlr->sc_iomem); 179 return (error); 180 } 181 /* Get the HW capabilities */ 182 version = ATA_INL(ctlr->r_mem, AHCI_VS); 183 ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP); 184 if (version >= 0x00010200) 185 ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2); 186 if (ctlr->caps & AHCI_CAP_EMS) 187 ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL); 188 ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI); 189 190 /* Identify and set separate quirks for HBA and RAID f/w Marvells. */ 191 if ((ctlr->quirks & AHCI_Q_ALTSIG) && 192 (ctlr->caps & AHCI_CAP_SPM) == 0) 193 ctlr->quirks |= AHCI_Q_NOBSYRES; 194 195 if (ctlr->quirks & AHCI_Q_1CH) { 196 ctlr->caps &= ~AHCI_CAP_NPMASK; 197 ctlr->ichannels &= 0x01; 198 } 199 if (ctlr->quirks & AHCI_Q_2CH) { 200 ctlr->caps &= ~AHCI_CAP_NPMASK; 201 ctlr->caps |= 1; 202 ctlr->ichannels &= 0x03; 203 } 204 if (ctlr->quirks & AHCI_Q_4CH) { 205 ctlr->caps &= ~AHCI_CAP_NPMASK; 206 ctlr->caps |= 3; 207 ctlr->ichannels &= 0x0f; 208 } 209 ctlr->channels = MAX(flsl(ctlr->ichannels), 210 (ctlr->caps & AHCI_CAP_NPMASK) + 1); 211 if (ctlr->quirks & AHCI_Q_NOPMP) 212 ctlr->caps &= ~AHCI_CAP_SPM; 213 if (ctlr->quirks & AHCI_Q_NONCQ) 214 ctlr->caps &= ~AHCI_CAP_SNCQ; 215 if ((ctlr->caps & AHCI_CAP_CCCS) == 0) 216 ctlr->ccc = 0; 217 ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC); 218 219 /* Create controller-wide DMA tag. */ 220 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 221 (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR : 222 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 223 BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 224 0, NULL, NULL, &ctlr->dma_tag)) { 225 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, 226 ctlr->r_mem); 227 rman_fini(&ctlr->sc_iomem); 228 return (ENXIO); 229 } 230 231 ahci_ctlr_setup(dev); 232 233 /* Setup interrupts. */ 234 if ((error = ahci_setup_interrupt(dev)) != 0) { 235 bus_dma_tag_destroy(ctlr->dma_tag); 236 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, 237 ctlr->r_mem); 238 rman_fini(&ctlr->sc_iomem); 239 return (error); 240 } 241 242 i = 0; 243 for (u = ctlr->ichannels; u != 0; u >>= 1) 244 i += (u & 1); 245 ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3)); 246 resource_int_value(device_get_name(dev), device_get_unit(dev), 247 "direct", &ctlr->direct); 248 /* Announce HW capabilities. */ 249 speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT; 250 device_printf(dev, 251 "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n", 252 ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f), 253 ((version >> 4) & 0xf0) + (version & 0x0f), 254 (ctlr->caps & AHCI_CAP_NPMASK) + 1, 255 ((speed == 1) ? "1.5":((speed == 2) ? "3": 256 ((speed == 3) ? "6":"?"))), 257 (ctlr->caps & AHCI_CAP_SPM) ? 258 "supported" : "not supported", 259 (ctlr->caps & AHCI_CAP_FBSS) ? 260 " with FBS" : ""); 261 if (ctlr->quirks != 0) { 262 device_printf(dev, "quirks=0x%b\n", ctlr->quirks, 263 AHCI_Q_BIT_STRING); 264 } 265 if (bootverbose) { 266 device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps", 267 (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"", 268 (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"", 269 (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"", 270 (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"", 271 (ctlr->caps & AHCI_CAP_SSS) ? " SS":"", 272 (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"", 273 (ctlr->caps & AHCI_CAP_SAL) ? " AL":"", 274 (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"", 275 ((speed == 1) ? "1.5":((speed == 2) ? "3": 276 ((speed == 3) ? "6":"?")))); 277 printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n", 278 (ctlr->caps & AHCI_CAP_SAM) ? " AM":"", 279 (ctlr->caps & AHCI_CAP_SPM) ? " PM":"", 280 (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"", 281 (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"", 282 (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"", 283 (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"", 284 ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1, 285 (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"", 286 (ctlr->caps & AHCI_CAP_EMS) ? " EM":"", 287 (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"", 288 (ctlr->caps & AHCI_CAP_NPMASK) + 1); 289 } 290 if (bootverbose && version >= 0x00010200) { 291 device_printf(dev, "Caps2:%s%s%s%s%s%s\n", 292 (ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"", 293 (ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"", 294 (ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"", 295 (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"", 296 (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"", 297 (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":""); 298 } 299 /* Attach all channels on this controller */ 300 for (unit = 0; unit < ctlr->channels; unit++) { 301 child = device_add_child(dev, "ahcich", -1); 302 if (child == NULL) { 303 device_printf(dev, "failed to add channel device\n"); 304 continue; 305 } 306 device_set_ivars(child, (void *)(intptr_t)unit); 307 if ((ctlr->ichannels & (1 << unit)) == 0) 308 device_disable(child); 309 } 310 if (ctlr->caps & AHCI_CAP_EMS) { 311 child = device_add_child(dev, "ahciem", -1); 312 if (child == NULL) 313 device_printf(dev, "failed to add enclosure device\n"); 314 else 315 device_set_ivars(child, (void *)(intptr_t)-1); 316 } 317 bus_generic_attach(dev); 318 return (0); 319 } 320 321 int 322 ahci_detach(device_t dev) 323 { 324 struct ahci_controller *ctlr = device_get_softc(dev); 325 int i; 326 327 /* Detach & delete all children */ 328 device_delete_children(dev); 329 330 /* Free interrupts. */ 331 for (i = 0; i < ctlr->numirqs; i++) { 332 if (ctlr->irqs[i].r_irq) { 333 bus_teardown_intr(dev, ctlr->irqs[i].r_irq, 334 ctlr->irqs[i].handle); 335 bus_release_resource(dev, SYS_RES_IRQ, 336 ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq); 337 } 338 } 339 bus_dma_tag_destroy(ctlr->dma_tag); 340 /* Free memory. */ 341 rman_fini(&ctlr->sc_iomem); 342 if (ctlr->r_mem) 343 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); 344 return (0); 345 } 346 347 int 348 ahci_setup_interrupt(device_t dev) 349 { 350 struct ahci_controller *ctlr = device_get_softc(dev); 351 int i; 352 353 /* Check for single MSI vector fallback. */ 354 if (ctlr->numirqs > 1 && 355 (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) { 356 device_printf(dev, "Falling back to one MSI\n"); 357 ctlr->numirqs = 1; 358 } 359 360 /* Ensure we don't overrun irqs. */ 361 if (ctlr->numirqs > AHCI_MAX_IRQS) { 362 device_printf(dev, "Too many irqs %d > %d (clamping)\n", 363 ctlr->numirqs, AHCI_MAX_IRQS); 364 ctlr->numirqs = AHCI_MAX_IRQS; 365 } 366 367 /* Allocate all IRQs. */ 368 for (i = 0; i < ctlr->numirqs; i++) { 369 ctlr->irqs[i].ctlr = ctlr; 370 ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0); 371 if (ctlr->channels == 1 && !ctlr->ccc && ctlr->msi) 372 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; 373 else if (ctlr->numirqs == 1 || i >= ctlr->channels || 374 (ctlr->ccc && i == ctlr->cccv)) 375 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL; 376 else if (i == ctlr->numirqs - 1) 377 ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER; 378 else 379 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; 380 if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 381 &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { 382 device_printf(dev, "unable to map interrupt\n"); 383 return (ENXIO); 384 } 385 if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL, 386 (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr : 387 ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge : 388 ahci_intr_one), 389 &ctlr->irqs[i], &ctlr->irqs[i].handle))) { 390 /* SOS XXX release r_irq */ 391 device_printf(dev, "unable to setup interrupt\n"); 392 return (ENXIO); 393 } 394 if (ctlr->numirqs > 1) { 395 bus_describe_intr(dev, ctlr->irqs[i].r_irq, 396 ctlr->irqs[i].handle, 397 ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ? 398 "ch%d" : "%d", i); 399 } 400 } 401 return (0); 402 } 403 404 /* 405 * Common case interrupt handler. 406 */ 407 static void 408 ahci_intr(void *data) 409 { 410 struct ahci_controller_irq *irq = data; 411 struct ahci_controller *ctlr = irq->ctlr; 412 u_int32_t is, ise = 0; 413 void *arg; 414 int unit; 415 416 if (irq->mode == AHCI_IRQ_MODE_ALL) { 417 unit = 0; 418 if (ctlr->ccc) 419 is = ctlr->ichannels; 420 else 421 is = ATA_INL(ctlr->r_mem, AHCI_IS); 422 } else { /* AHCI_IRQ_MODE_AFTER */ 423 unit = irq->r_irq_rid - 1; 424 is = ATA_INL(ctlr->r_mem, AHCI_IS); 425 } 426 /* CCC interrupt is edge triggered. */ 427 if (ctlr->ccc) 428 ise = 1 << ctlr->cccv; 429 /* Some controllers have edge triggered IS. */ 430 if (ctlr->quirks & AHCI_Q_EDGEIS) 431 ise |= is; 432 if (ise != 0) 433 ATA_OUTL(ctlr->r_mem, AHCI_IS, ise); 434 for (; unit < ctlr->channels; unit++) { 435 if ((is & (1 << unit)) != 0 && 436 (arg = ctlr->interrupt[unit].argument)) { 437 ctlr->interrupt[unit].function(arg); 438 } 439 } 440 /* AHCI declares level triggered IS. */ 441 if (!(ctlr->quirks & AHCI_Q_EDGEIS)) 442 ATA_OUTL(ctlr->r_mem, AHCI_IS, is); 443 } 444 445 /* 446 * Simplified interrupt handler for multivector MSI mode. 447 */ 448 static void 449 ahci_intr_one(void *data) 450 { 451 struct ahci_controller_irq *irq = data; 452 struct ahci_controller *ctlr = irq->ctlr; 453 void *arg; 454 int unit; 455 456 unit = irq->r_irq_rid - 1; 457 if ((arg = ctlr->interrupt[unit].argument)) 458 ctlr->interrupt[unit].function(arg); 459 /* AHCI declares level triggered IS. */ 460 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); 461 } 462 463 static void 464 ahci_intr_one_edge(void *data) 465 { 466 struct ahci_controller_irq *irq = data; 467 struct ahci_controller *ctlr = irq->ctlr; 468 void *arg; 469 int unit; 470 471 unit = irq->r_irq_rid - 1; 472 /* Some controllers have edge triggered IS. */ 473 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); 474 if ((arg = ctlr->interrupt[unit].argument)) 475 ctlr->interrupt[unit].function(arg); 476 } 477 478 struct resource * 479 ahci_alloc_resource(device_t dev, device_t child, int type, int *rid, 480 u_long start, u_long end, u_long count, u_int flags) 481 { 482 struct ahci_controller *ctlr = device_get_softc(dev); 483 struct resource *res; 484 long st; 485 int offset, size, unit; 486 487 unit = (intptr_t)device_get_ivars(child); 488 res = NULL; 489 switch (type) { 490 case SYS_RES_MEMORY: 491 if (unit >= 0) { 492 offset = AHCI_OFFSET + (unit << 7); 493 size = 128; 494 } else if (*rid == 0) { 495 offset = AHCI_EM_CTL; 496 size = 4; 497 } else { 498 offset = (ctlr->emloc & 0xffff0000) >> 14; 499 size = (ctlr->emloc & 0x0000ffff) << 2; 500 if (*rid != 1) { 501 if (*rid == 2 && (ctlr->capsem & 502 (AHCI_EM_XMT | AHCI_EM_SMB)) == 0) 503 offset += size; 504 else 505 break; 506 } 507 } 508 st = rman_get_start(ctlr->r_mem); 509 res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, 510 st + offset + size - 1, size, RF_ACTIVE, child); 511 if (res) { 512 bus_space_handle_t bsh; 513 bus_space_tag_t bst; 514 bsh = rman_get_bushandle(ctlr->r_mem); 515 bst = rman_get_bustag(ctlr->r_mem); 516 bus_space_subregion(bst, bsh, offset, 128, &bsh); 517 rman_set_bushandle(res, bsh); 518 rman_set_bustag(res, bst); 519 } 520 break; 521 case SYS_RES_IRQ: 522 if (*rid == ATA_IRQ_RID) 523 res = ctlr->irqs[0].r_irq; 524 break; 525 } 526 return (res); 527 } 528 529 int 530 ahci_release_resource(device_t dev, device_t child, int type, int rid, 531 struct resource *r) 532 { 533 534 switch (type) { 535 case SYS_RES_MEMORY: 536 rman_release_resource(r); 537 return (0); 538 case SYS_RES_IRQ: 539 if (rid != ATA_IRQ_RID) 540 return (ENOENT); 541 return (0); 542 } 543 return (EINVAL); 544 } 545 546 int 547 ahci_setup_intr(device_t dev, device_t child, struct resource *irq, 548 int flags, driver_filter_t *filter, driver_intr_t *function, 549 void *argument, void **cookiep) 550 { 551 struct ahci_controller *ctlr = device_get_softc(dev); 552 int unit = (intptr_t)device_get_ivars(child); 553 554 if (filter != NULL) { 555 printf("ahci.c: we cannot use a filter here\n"); 556 return (EINVAL); 557 } 558 ctlr->interrupt[unit].function = function; 559 ctlr->interrupt[unit].argument = argument; 560 return (0); 561 } 562 563 int 564 ahci_teardown_intr(device_t dev, device_t child, struct resource *irq, 565 void *cookie) 566 { 567 struct ahci_controller *ctlr = device_get_softc(dev); 568 int unit = (intptr_t)device_get_ivars(child); 569 570 ctlr->interrupt[unit].function = NULL; 571 ctlr->interrupt[unit].argument = NULL; 572 return (0); 573 } 574 575 int 576 ahci_print_child(device_t dev, device_t child) 577 { 578 int retval, channel; 579 580 retval = bus_print_child_header(dev, child); 581 channel = (int)(intptr_t)device_get_ivars(child); 582 if (channel >= 0) 583 retval += printf(" at channel %d", channel); 584 retval += bus_print_child_footer(dev, child); 585 return (retval); 586 } 587 588 int 589 ahci_child_location_str(device_t dev, device_t child, char *buf, 590 size_t buflen) 591 { 592 int channel; 593 594 channel = (int)(intptr_t)device_get_ivars(child); 595 if (channel >= 0) 596 snprintf(buf, buflen, "channel=%d", channel); 597 return (0); 598 } 599 600 bus_dma_tag_t 601 ahci_get_dma_tag(device_t dev, device_t child) 602 { 603 struct ahci_controller *ctlr = device_get_softc(dev); 604 605 return (ctlr->dma_tag); 606 } 607 608 static int 609 ahci_ch_probe(device_t dev) 610 { 611 612 device_set_desc_copy(dev, "AHCI channel"); 613 return (0); 614 } 615 616 static int 617 ahci_ch_attach(device_t dev) 618 { 619 struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev)); 620 struct ahci_channel *ch = device_get_softc(dev); 621 struct cam_devq *devq; 622 int rid, error, i, sata_rev = 0; 623 u_int32_t version; 624 625 ch->dev = dev; 626 ch->unit = (intptr_t)device_get_ivars(dev); 627 ch->caps = ctlr->caps; 628 ch->caps2 = ctlr->caps2; 629 ch->quirks = ctlr->quirks; 630 ch->vendorid = ctlr->vendorid; 631 ch->deviceid = ctlr->deviceid; 632 ch->subvendorid = ctlr->subvendorid; 633 ch->subdeviceid = ctlr->subdeviceid; 634 ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1; 635 mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF); 636 ch->pm_level = 0; 637 resource_int_value(device_get_name(dev), 638 device_get_unit(dev), "pm_level", &ch->pm_level); 639 STAILQ_INIT(&ch->doneq); 640 if (ch->pm_level > 3) 641 callout_init_mtx(&ch->pm_timer, &ch->mtx, 0); 642 callout_init_mtx(&ch->reset_timer, &ch->mtx, 0); 643 /* JMicron external ports (0) sometimes limited */ 644 if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0) 645 sata_rev = 1; 646 if (ch->quirks & AHCI_Q_SATA2) 647 sata_rev = 2; 648 resource_int_value(device_get_name(dev), 649 device_get_unit(dev), "sata_rev", &sata_rev); 650 for (i = 0; i < 16; i++) { 651 ch->user[i].revision = sata_rev; 652 ch->user[i].mode = 0; 653 ch->user[i].bytecount = 8192; 654 ch->user[i].tags = ch->numslots; 655 ch->user[i].caps = 0; 656 ch->curr[i] = ch->user[i]; 657 if (ch->pm_level) { 658 ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ | 659 CTS_SATA_CAPS_H_APST | 660 CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST; 661 } 662 ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA | 663 CTS_SATA_CAPS_H_AN; 664 } 665 rid = 0; 666 if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 667 &rid, RF_ACTIVE))) 668 return (ENXIO); 669 ahci_dmainit(dev); 670 ahci_slotsalloc(dev); 671 ahci_ch_init(dev); 672 mtx_lock(&ch->mtx); 673 rid = ATA_IRQ_RID; 674 if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 675 &rid, RF_SHAREABLE | RF_ACTIVE))) { 676 device_printf(dev, "Unable to map interrupt\n"); 677 error = ENXIO; 678 goto err0; 679 } 680 if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 681 ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr, 682 ch, &ch->ih))) { 683 device_printf(dev, "Unable to setup interrupt\n"); 684 error = ENXIO; 685 goto err1; 686 } 687 ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD); 688 version = ATA_INL(ctlr->r_mem, AHCI_VS); 689 if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS)) 690 ch->chcaps |= AHCI_P_CMD_FBSCP; 691 if (ch->caps2 & AHCI_CAP2_SDS) 692 ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP); 693 if (bootverbose) { 694 device_printf(dev, "Caps:%s%s%s%s%s%s\n", 695 (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"", 696 (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"", 697 (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"", 698 (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"", 699 (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"", 700 (ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":""); 701 } 702 /* Create the device queue for our SIM. */ 703 devq = cam_simq_alloc(ch->numslots); 704 if (devq == NULL) { 705 device_printf(dev, "Unable to allocate simq\n"); 706 error = ENOMEM; 707 goto err1; 708 } 709 /* Construct SIM entry */ 710 ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch, 711 device_get_unit(dev), (struct mtx *)&ch->mtx, 712 min(2, ch->numslots), 713 (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0, 714 devq); 715 if (ch->sim == NULL) { 716 cam_simq_free(devq); 717 device_printf(dev, "unable to allocate sim\n"); 718 error = ENOMEM; 719 goto err1; 720 } 721 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { 722 device_printf(dev, "unable to register xpt bus\n"); 723 error = ENXIO; 724 goto err2; 725 } 726 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), 727 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 728 device_printf(dev, "unable to create path\n"); 729 error = ENXIO; 730 goto err3; 731 } 732 if (ch->pm_level > 3) { 733 callout_reset(&ch->pm_timer, 734 (ch->pm_level == 4) ? hz / 1000 : hz / 8, 735 ahci_ch_pm, ch); 736 } 737 mtx_unlock(&ch->mtx); 738 return (0); 739 740 err3: 741 xpt_bus_deregister(cam_sim_path(ch->sim)); 742 err2: 743 cam_sim_free(ch->sim, /*free_devq*/TRUE); 744 err1: 745 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 746 err0: 747 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); 748 mtx_unlock(&ch->mtx); 749 mtx_destroy(&ch->mtx); 750 return (error); 751 } 752 753 static int 754 ahci_ch_detach(device_t dev) 755 { 756 struct ahci_channel *ch = device_get_softc(dev); 757 758 mtx_lock(&ch->mtx); 759 xpt_async(AC_LOST_DEVICE, ch->path, NULL); 760 /* Forget about reset. */ 761 if (ch->resetting) { 762 ch->resetting = 0; 763 xpt_release_simq(ch->sim, TRUE); 764 } 765 xpt_free_path(ch->path); 766 xpt_bus_deregister(cam_sim_path(ch->sim)); 767 cam_sim_free(ch->sim, /*free_devq*/TRUE); 768 mtx_unlock(&ch->mtx); 769 770 if (ch->pm_level > 3) 771 callout_drain(&ch->pm_timer); 772 callout_drain(&ch->reset_timer); 773 bus_teardown_intr(dev, ch->r_irq, ch->ih); 774 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 775 776 ahci_ch_deinit(dev); 777 ahci_slotsfree(dev); 778 ahci_dmafini(dev); 779 780 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); 781 mtx_destroy(&ch->mtx); 782 return (0); 783 } 784 785 static int 786 ahci_ch_init(device_t dev) 787 { 788 struct ahci_channel *ch = device_get_softc(dev); 789 uint64_t work; 790 791 /* Disable port interrupts */ 792 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 793 /* Setup work areas */ 794 work = ch->dma.work_bus + AHCI_CL_OFFSET; 795 ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff); 796 ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32); 797 work = ch->dma.rfis_bus; 798 ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff); 799 ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32); 800 /* Activate the channel and power/spin up device */ 801 ATA_OUTL(ch->r_mem, AHCI_P_CMD, 802 (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD | 803 ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) | 804 ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 ))); 805 ahci_start_fr(ch); 806 ahci_start(ch, 1); 807 return (0); 808 } 809 810 static int 811 ahci_ch_deinit(device_t dev) 812 { 813 struct ahci_channel *ch = device_get_softc(dev); 814 815 /* Disable port interrupts. */ 816 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 817 /* Reset command register. */ 818 ahci_stop(ch); 819 ahci_stop_fr(ch); 820 ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0); 821 /* Allow everything, including partial and slumber modes. */ 822 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0); 823 /* Request slumber mode transition and give some time to get there. */ 824 ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER); 825 DELAY(100); 826 /* Disable PHY. */ 827 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); 828 return (0); 829 } 830 831 static int 832 ahci_ch_suspend(device_t dev) 833 { 834 struct ahci_channel *ch = device_get_softc(dev); 835 836 mtx_lock(&ch->mtx); 837 xpt_freeze_simq(ch->sim, 1); 838 /* Forget about reset. */ 839 if (ch->resetting) { 840 ch->resetting = 0; 841 callout_stop(&ch->reset_timer); 842 xpt_release_simq(ch->sim, TRUE); 843 } 844 while (ch->oslots) 845 msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100); 846 ahci_ch_deinit(dev); 847 mtx_unlock(&ch->mtx); 848 return (0); 849 } 850 851 static int 852 ahci_ch_resume(device_t dev) 853 { 854 struct ahci_channel *ch = device_get_softc(dev); 855 856 mtx_lock(&ch->mtx); 857 ahci_ch_init(dev); 858 ahci_reset(ch); 859 xpt_release_simq(ch->sim, TRUE); 860 mtx_unlock(&ch->mtx); 861 return (0); 862 } 863 864 devclass_t ahcich_devclass; 865 static device_method_t ahcich_methods[] = { 866 DEVMETHOD(device_probe, ahci_ch_probe), 867 DEVMETHOD(device_attach, ahci_ch_attach), 868 DEVMETHOD(device_detach, ahci_ch_detach), 869 DEVMETHOD(device_suspend, ahci_ch_suspend), 870 DEVMETHOD(device_resume, ahci_ch_resume), 871 DEVMETHOD_END 872 }; 873 static driver_t ahcich_driver = { 874 "ahcich", 875 ahcich_methods, 876 sizeof(struct ahci_channel) 877 }; 878 DRIVER_MODULE(ahcich, ahci, ahcich_driver, ahcich_devclass, NULL, NULL); 879 880 struct ahci_dc_cb_args { 881 bus_addr_t maddr; 882 int error; 883 }; 884 885 static void 886 ahci_dmainit(device_t dev) 887 { 888 struct ahci_channel *ch = device_get_softc(dev); 889 struct ahci_dc_cb_args dcba; 890 size_t rfsize; 891 892 /* Command area. */ 893 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, 894 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 895 NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE, 896 0, NULL, NULL, &ch->dma.work_tag)) 897 goto error; 898 if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, 899 BUS_DMA_ZERO, &ch->dma.work_map)) 900 goto error; 901 if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, 902 AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) { 903 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); 904 goto error; 905 } 906 ch->dma.work_bus = dcba.maddr; 907 /* FIS receive area. */ 908 if (ch->chcaps & AHCI_P_CMD_FBSCP) 909 rfsize = 4096; 910 else 911 rfsize = 256; 912 if (bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0, 913 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 914 NULL, NULL, rfsize, 1, rfsize, 915 0, NULL, NULL, &ch->dma.rfis_tag)) 916 goto error; 917 if (bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0, 918 &ch->dma.rfis_map)) 919 goto error; 920 if (bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis, 921 rfsize, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) { 922 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); 923 goto error; 924 } 925 ch->dma.rfis_bus = dcba.maddr; 926 /* Data area. */ 927 if (bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, 928 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 929 NULL, NULL, 930 AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots, 931 AHCI_SG_ENTRIES, AHCI_PRD_MAX, 932 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) { 933 goto error; 934 } 935 return; 936 937 error: 938 device_printf(dev, "WARNING - DMA initialization failed\n"); 939 ahci_dmafini(dev); 940 } 941 942 static void 943 ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 944 { 945 struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc; 946 947 if (!(dcba->error = error)) 948 dcba->maddr = segs[0].ds_addr; 949 } 950 951 static void 952 ahci_dmafini(device_t dev) 953 { 954 struct ahci_channel *ch = device_get_softc(dev); 955 956 if (ch->dma.data_tag) { 957 bus_dma_tag_destroy(ch->dma.data_tag); 958 ch->dma.data_tag = NULL; 959 } 960 if (ch->dma.rfis_bus) { 961 bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map); 962 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); 963 ch->dma.rfis_bus = 0; 964 ch->dma.rfis = NULL; 965 } 966 if (ch->dma.work_bus) { 967 bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); 968 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); 969 ch->dma.work_bus = 0; 970 ch->dma.work = NULL; 971 } 972 if (ch->dma.work_tag) { 973 bus_dma_tag_destroy(ch->dma.work_tag); 974 ch->dma.work_tag = NULL; 975 } 976 } 977 978 static void 979 ahci_slotsalloc(device_t dev) 980 { 981 struct ahci_channel *ch = device_get_softc(dev); 982 int i; 983 984 /* Alloc and setup command/dma slots */ 985 bzero(ch->slot, sizeof(ch->slot)); 986 for (i = 0; i < ch->numslots; i++) { 987 struct ahci_slot *slot = &ch->slot[i]; 988 989 slot->ch = ch; 990 slot->slot = i; 991 slot->state = AHCI_SLOT_EMPTY; 992 slot->ccb = NULL; 993 callout_init_mtx(&slot->timeout, &ch->mtx, 0); 994 995 if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) 996 device_printf(ch->dev, "FAILURE - create data_map\n"); 997 } 998 } 999 1000 static void 1001 ahci_slotsfree(device_t dev) 1002 { 1003 struct ahci_channel *ch = device_get_softc(dev); 1004 int i; 1005 1006 /* Free all dma slots */ 1007 for (i = 0; i < ch->numslots; i++) { 1008 struct ahci_slot *slot = &ch->slot[i]; 1009 1010 callout_drain(&slot->timeout); 1011 if (slot->dma.data_map) { 1012 bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); 1013 slot->dma.data_map = NULL; 1014 } 1015 } 1016 } 1017 1018 static int 1019 ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr) 1020 { 1021 1022 if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) || 1023 ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) { 1024 u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS); 1025 union ccb *ccb; 1026 1027 if (bootverbose) { 1028 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) 1029 device_printf(ch->dev, "CONNECT requested\n"); 1030 else 1031 device_printf(ch->dev, "DISCONNECT requested\n"); 1032 } 1033 ahci_reset(ch); 1034 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 1035 return (0); 1036 if (xpt_create_path(&ccb->ccb_h.path, NULL, 1037 cam_sim_path(ch->sim), 1038 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1039 xpt_free_ccb(ccb); 1040 return (0); 1041 } 1042 xpt_rescan(ccb); 1043 return (1); 1044 } 1045 return (0); 1046 } 1047 1048 static void 1049 ahci_cpd_check_events(struct ahci_channel *ch) 1050 { 1051 u_int32_t status; 1052 union ccb *ccb; 1053 device_t dev; 1054 1055 if (ch->pm_level == 0) 1056 return; 1057 1058 status = ATA_INL(ch->r_mem, AHCI_P_CMD); 1059 if ((status & AHCI_P_CMD_CPD) == 0) 1060 return; 1061 1062 if (bootverbose) { 1063 dev = ch->dev; 1064 if (status & AHCI_P_CMD_CPS) { 1065 device_printf(dev, "COLD CONNECT requested\n"); 1066 } else 1067 device_printf(dev, "COLD DISCONNECT requested\n"); 1068 } 1069 ahci_reset(ch); 1070 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 1071 return; 1072 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), 1073 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1074 xpt_free_ccb(ccb); 1075 return; 1076 } 1077 xpt_rescan(ccb); 1078 } 1079 1080 static void 1081 ahci_notify_events(struct ahci_channel *ch, u_int32_t status) 1082 { 1083 struct cam_path *dpath; 1084 int i; 1085 1086 if (ch->caps & AHCI_CAP_SSNTF) 1087 ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status); 1088 if (bootverbose) 1089 device_printf(ch->dev, "SNTF 0x%04x\n", status); 1090 for (i = 0; i < 16; i++) { 1091 if ((status & (1 << i)) == 0) 1092 continue; 1093 if (xpt_create_path(&dpath, NULL, 1094 xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { 1095 xpt_async(AC_SCSI_AEN, dpath, NULL); 1096 xpt_free_path(dpath); 1097 } 1098 } 1099 } 1100 1101 static void 1102 ahci_done(struct ahci_channel *ch, union ccb *ccb) 1103 { 1104 1105 mtx_assert(&ch->mtx, MA_OWNED); 1106 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 || 1107 ch->batch == 0) { 1108 xpt_done(ccb); 1109 return; 1110 } 1111 1112 STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe); 1113 } 1114 1115 static void 1116 ahci_ch_intr(void *arg) 1117 { 1118 struct ahci_channel *ch = (struct ahci_channel *)arg; 1119 uint32_t istatus; 1120 1121 /* Read interrupt statuses. */ 1122 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 1123 if (istatus == 0) 1124 return; 1125 1126 mtx_lock(&ch->mtx); 1127 ahci_ch_intr_main(ch, istatus); 1128 mtx_unlock(&ch->mtx); 1129 } 1130 1131 static void 1132 ahci_ch_intr_direct(void *arg) 1133 { 1134 struct ahci_channel *ch = (struct ahci_channel *)arg; 1135 struct ccb_hdr *ccb_h; 1136 uint32_t istatus; 1137 STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq); 1138 1139 /* Read interrupt statuses. */ 1140 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 1141 if (istatus == 0) 1142 return; 1143 1144 mtx_lock(&ch->mtx); 1145 ch->batch = 1; 1146 ahci_ch_intr_main(ch, istatus); 1147 ch->batch = 0; 1148 /* 1149 * Prevent the possibility of issues caused by processing the queue 1150 * while unlocked below by moving the contents to a local queue. 1151 */ 1152 STAILQ_CONCAT(&tmp_doneq, &ch->doneq); 1153 mtx_unlock(&ch->mtx); 1154 while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) { 1155 STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe); 1156 xpt_done_direct((union ccb *)ccb_h); 1157 } 1158 } 1159 1160 static void 1161 ahci_ch_pm(void *arg) 1162 { 1163 struct ahci_channel *ch = (struct ahci_channel *)arg; 1164 uint32_t work; 1165 1166 if (ch->numrslots != 0) 1167 return; 1168 work = ATA_INL(ch->r_mem, AHCI_P_CMD); 1169 if (ch->pm_level == 4) 1170 work |= AHCI_P_CMD_PARTIAL; 1171 else 1172 work |= AHCI_P_CMD_SLUMBER; 1173 ATA_OUTL(ch->r_mem, AHCI_P_CMD, work); 1174 } 1175 1176 static void 1177 ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus) 1178 { 1179 uint32_t cstatus, serr = 0, sntf = 0, ok, err; 1180 enum ahci_err_type et; 1181 int i, ccs, port, reset = 0; 1182 1183 /* Clear interrupt statuses. */ 1184 ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus); 1185 /* Read command statuses. */ 1186 if (ch->numtslots != 0) 1187 cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); 1188 else 1189 cstatus = 0; 1190 if (ch->numrslots != ch->numtslots) 1191 cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI); 1192 /* Read SNTF in one of possible ways. */ 1193 if ((istatus & AHCI_P_IX_SDB) && 1194 (ch->pm_present || ch->curr[0].atapi != 0)) { 1195 if (ch->caps & AHCI_CAP_SSNTF) 1196 sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF); 1197 else if (ch->fbs_enabled) { 1198 u_int8_t *fis = ch->dma.rfis + 0x58; 1199 1200 for (i = 0; i < 16; i++) { 1201 if (fis[1] & 0x80) { 1202 fis[1] &= 0x7f; 1203 sntf |= 1 << i; 1204 } 1205 fis += 256; 1206 } 1207 } else { 1208 u_int8_t *fis = ch->dma.rfis + 0x58; 1209 1210 if (fis[1] & 0x80) 1211 sntf = (1 << (fis[1] & 0x0f)); 1212 } 1213 } 1214 /* Process PHY events */ 1215 if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF | 1216 AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { 1217 serr = ATA_INL(ch->r_mem, AHCI_P_SERR); 1218 if (serr) { 1219 ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr); 1220 reset = ahci_phy_check_events(ch, serr); 1221 } 1222 } 1223 /* Process cold presence detection events */ 1224 if ((istatus & AHCI_P_IX_CPD) && !reset) 1225 ahci_cpd_check_events(ch); 1226 /* Process command errors */ 1227 if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF | 1228 AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { 1229 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) 1230 >> AHCI_P_CMD_CCS_SHIFT; 1231 //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n", 1232 // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), 1233 // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs); 1234 port = -1; 1235 if (ch->fbs_enabled) { 1236 uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS); 1237 if (fbs & AHCI_P_FBS_SDE) { 1238 port = (fbs & AHCI_P_FBS_DWE) 1239 >> AHCI_P_FBS_DWE_SHIFT; 1240 } else { 1241 for (i = 0; i < 16; i++) { 1242 if (ch->numrslotspd[i] == 0) 1243 continue; 1244 if (port == -1) 1245 port = i; 1246 else if (port != i) { 1247 port = -2; 1248 break; 1249 } 1250 } 1251 } 1252 } 1253 err = ch->rslots & cstatus; 1254 } else { 1255 ccs = 0; 1256 err = 0; 1257 port = -1; 1258 } 1259 /* Complete all successfull commands. */ 1260 ok = ch->rslots & ~cstatus; 1261 for (i = 0; i < ch->numslots; i++) { 1262 if ((ok >> i) & 1) 1263 ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE); 1264 } 1265 /* On error, complete the rest of commands with error statuses. */ 1266 if (err) { 1267 if (ch->frozen) { 1268 union ccb *fccb = ch->frozen; 1269 ch->frozen = NULL; 1270 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 1271 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 1272 xpt_freeze_devq(fccb->ccb_h.path, 1); 1273 fccb->ccb_h.status |= CAM_DEV_QFRZN; 1274 } 1275 ahci_done(ch, fccb); 1276 } 1277 for (i = 0; i < ch->numslots; i++) { 1278 /* XXX: reqests in loading state. */ 1279 if (((err >> i) & 1) == 0) 1280 continue; 1281 if (port >= 0 && 1282 ch->slot[i].ccb->ccb_h.target_id != port) 1283 continue; 1284 if (istatus & AHCI_P_IX_TFE) { 1285 if (port != -2) { 1286 /* Task File Error */ 1287 if (ch->numtslotspd[ 1288 ch->slot[i].ccb->ccb_h.target_id] == 0) { 1289 /* Untagged operation. */ 1290 if (i == ccs) 1291 et = AHCI_ERR_TFE; 1292 else 1293 et = AHCI_ERR_INNOCENT; 1294 } else { 1295 /* Tagged operation. */ 1296 et = AHCI_ERR_NCQ; 1297 } 1298 } else { 1299 et = AHCI_ERR_TFE; 1300 ch->fatalerr = 1; 1301 } 1302 } else if (istatus & AHCI_P_IX_IF) { 1303 if (ch->numtslots == 0 && i != ccs && port != -2) 1304 et = AHCI_ERR_INNOCENT; 1305 else 1306 et = AHCI_ERR_SATA; 1307 } else 1308 et = AHCI_ERR_INVALID; 1309 ahci_end_transaction(&ch->slot[i], et); 1310 } 1311 /* 1312 * We can't reinit port if there are some other 1313 * commands active, use resume to complete them. 1314 */ 1315 if (ch->rslots != 0 && !ch->recoverycmd) 1316 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC); 1317 } 1318 /* Process NOTIFY events */ 1319 if (sntf) 1320 ahci_notify_events(ch, sntf); 1321 } 1322 1323 /* Must be called with channel locked. */ 1324 static int 1325 ahci_check_collision(struct ahci_channel *ch, union ccb *ccb) 1326 { 1327 int t = ccb->ccb_h.target_id; 1328 1329 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1330 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1331 /* Tagged command while we have no supported tag free. */ 1332 if (((~ch->oslots) & (0xffffffff >> (32 - 1333 ch->curr[t].tags))) == 0) 1334 return (1); 1335 /* If we have FBS */ 1336 if (ch->fbs_enabled) { 1337 /* Tagged command while untagged are active. */ 1338 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0) 1339 return (1); 1340 } else { 1341 /* Tagged command while untagged are active. */ 1342 if (ch->numrslots != 0 && ch->numtslots == 0) 1343 return (1); 1344 /* Tagged command while tagged to other target is active. */ 1345 if (ch->numtslots != 0 && 1346 ch->taggedtarget != ccb->ccb_h.target_id) 1347 return (1); 1348 } 1349 } else { 1350 /* If we have FBS */ 1351 if (ch->fbs_enabled) { 1352 /* Untagged command while tagged are active. */ 1353 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0) 1354 return (1); 1355 } else { 1356 /* Untagged command while tagged are active. */ 1357 if (ch->numrslots != 0 && ch->numtslots != 0) 1358 return (1); 1359 } 1360 } 1361 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1362 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { 1363 /* Atomic command while anything active. */ 1364 if (ch->numrslots != 0) 1365 return (1); 1366 } 1367 /* We have some atomic command running. */ 1368 if (ch->aslots != 0) 1369 return (1); 1370 return (0); 1371 } 1372 1373 /* Must be called with channel locked. */ 1374 static void 1375 ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb) 1376 { 1377 struct ahci_slot *slot; 1378 int tag, tags; 1379 1380 /* Choose empty slot. */ 1381 tags = ch->numslots; 1382 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1383 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) 1384 tags = ch->curr[ccb->ccb_h.target_id].tags; 1385 if (ch->lastslot + 1 < tags) 1386 tag = ffs(~(ch->oslots >> (ch->lastslot + 1))); 1387 else 1388 tag = 0; 1389 if (tag == 0 || tag + ch->lastslot >= tags) 1390 tag = ffs(~ch->oslots) - 1; 1391 else 1392 tag += ch->lastslot; 1393 ch->lastslot = tag; 1394 /* Occupy chosen slot. */ 1395 slot = &ch->slot[tag]; 1396 slot->ccb = ccb; 1397 /* Stop PM timer. */ 1398 if (ch->numrslots == 0 && ch->pm_level > 3) 1399 callout_stop(&ch->pm_timer); 1400 /* Update channel stats. */ 1401 ch->oslots |= (1 << tag); 1402 ch->numrslots++; 1403 ch->numrslotspd[ccb->ccb_h.target_id]++; 1404 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1405 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1406 ch->numtslots++; 1407 ch->numtslotspd[ccb->ccb_h.target_id]++; 1408 ch->taggedtarget = ccb->ccb_h.target_id; 1409 } 1410 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1411 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) 1412 ch->aslots |= (1 << tag); 1413 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1414 slot->state = AHCI_SLOT_LOADING; 1415 bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, 1416 ahci_dmasetprd, slot, 0); 1417 } else { 1418 slot->dma.nsegs = 0; 1419 ahci_execute_transaction(slot); 1420 } 1421 } 1422 1423 /* Locked by busdma engine. */ 1424 static void 1425 ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1426 { 1427 struct ahci_slot *slot = arg; 1428 struct ahci_channel *ch = slot->ch; 1429 struct ahci_cmd_tab *ctp; 1430 struct ahci_dma_prd *prd; 1431 int i; 1432 1433 if (error) { 1434 device_printf(ch->dev, "DMA load error\n"); 1435 ahci_end_transaction(slot, AHCI_ERR_INVALID); 1436 return; 1437 } 1438 KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n")); 1439 /* Get a piece of the workspace for this request */ 1440 ctp = (struct ahci_cmd_tab *) 1441 (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); 1442 /* Fill S/G table */ 1443 prd = &ctp->prd_tab[0]; 1444 for (i = 0; i < nsegs; i++) { 1445 prd[i].dba = htole64(segs[i].ds_addr); 1446 prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK); 1447 } 1448 slot->dma.nsegs = nsegs; 1449 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, 1450 ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? 1451 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1452 ahci_execute_transaction(slot); 1453 } 1454 1455 /* Must be called with channel locked. */ 1456 static void 1457 ahci_execute_transaction(struct ahci_slot *slot) 1458 { 1459 struct ahci_channel *ch = slot->ch; 1460 struct ahci_cmd_tab *ctp; 1461 struct ahci_cmd_list *clp; 1462 union ccb *ccb = slot->ccb; 1463 int port = ccb->ccb_h.target_id & 0x0f; 1464 int fis_size, i, softreset; 1465 uint8_t *fis = ch->dma.rfis + 0x40; 1466 uint8_t val; 1467 1468 /* Get a piece of the workspace for this request */ 1469 ctp = (struct ahci_cmd_tab *) 1470 (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); 1471 /* Setup the FIS for this request */ 1472 if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) { 1473 device_printf(ch->dev, "Setting up SATA FIS failed\n"); 1474 ahci_end_transaction(slot, AHCI_ERR_INVALID); 1475 return; 1476 } 1477 /* Setup the command list entry */ 1478 clp = (struct ahci_cmd_list *) 1479 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); 1480 clp->cmd_flags = htole16( 1481 (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) | 1482 (ccb->ccb_h.func_code == XPT_SCSI_IO ? 1483 (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) | 1484 (fis_size / sizeof(u_int32_t)) | 1485 (port << 12)); 1486 clp->prd_length = htole16(slot->dma.nsegs); 1487 /* Special handling for Soft Reset command. */ 1488 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1489 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) { 1490 if (ccb->ataio.cmd.control & ATA_A_RESET) { 1491 softreset = 1; 1492 /* Kick controller into sane state */ 1493 ahci_stop(ch); 1494 ahci_clo(ch); 1495 ahci_start(ch, 0); 1496 clp->cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY; 1497 } else { 1498 softreset = 2; 1499 /* Prepare FIS receive area for check. */ 1500 for (i = 0; i < 20; i++) 1501 fis[i] = 0xff; 1502 } 1503 } else 1504 softreset = 0; 1505 clp->bytecount = 0; 1506 clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET + 1507 (AHCI_CT_SIZE * slot->slot)); 1508 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 1509 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1510 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, 1511 BUS_DMASYNC_PREREAD); 1512 /* Set ACTIVE bit for NCQ commands. */ 1513 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1514 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1515 ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot); 1516 } 1517 /* If FBS is enabled, set PMP port. */ 1518 if (ch->fbs_enabled) { 1519 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | 1520 (port << AHCI_P_FBS_DEV_SHIFT)); 1521 } 1522 /* Issue command to the controller. */ 1523 slot->state = AHCI_SLOT_RUNNING; 1524 ch->rslots |= (1 << slot->slot); 1525 ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot)); 1526 /* Device reset commands doesn't interrupt. Poll them. */ 1527 if (ccb->ccb_h.func_code == XPT_ATA_IO && 1528 (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) { 1529 int count, timeout = ccb->ccb_h.timeout * 100; 1530 enum ahci_err_type et = AHCI_ERR_NONE; 1531 1532 for (count = 0; count < timeout; count++) { 1533 DELAY(10); 1534 if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot))) 1535 break; 1536 if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) && 1537 softreset != 1) { 1538 #if 0 1539 device_printf(ch->dev, 1540 "Poll error on slot %d, TFD: %04x\n", 1541 slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD)); 1542 #endif 1543 et = AHCI_ERR_TFE; 1544 break; 1545 } 1546 /* Workaround for ATI SB600/SB700 chipsets. */ 1547 if (ccb->ccb_h.target_id == 15 && 1548 (ch->quirks & AHCI_Q_ATI_PMP_BUG) && 1549 (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) { 1550 et = AHCI_ERR_TIMEOUT; 1551 break; 1552 } 1553 } 1554 1555 /* 1556 * Marvell HBAs with non-RAID firmware do not wait for 1557 * readiness after soft reset, so we have to wait here. 1558 * Marvell RAIDs do not have this problem, but instead 1559 * sometimes forget to update FIS receive area, breaking 1560 * this wait. 1561 */ 1562 if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 && 1563 (ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 && 1564 softreset == 2 && et == AHCI_ERR_NONE) { 1565 while ((val = fis[2]) & ATA_S_BUSY) { 1566 DELAY(10); 1567 if (count++ >= timeout) 1568 break; 1569 } 1570 } 1571 1572 if (timeout && (count >= timeout)) { 1573 device_printf(ch->dev, "Poll timeout on slot %d port %d\n", 1574 slot->slot, port); 1575 device_printf(ch->dev, "is %08x cs %08x ss %08x " 1576 "rs %08x tfd %02x serr %08x cmd %08x\n", 1577 ATA_INL(ch->r_mem, AHCI_P_IS), 1578 ATA_INL(ch->r_mem, AHCI_P_CI), 1579 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, 1580 ATA_INL(ch->r_mem, AHCI_P_TFD), 1581 ATA_INL(ch->r_mem, AHCI_P_SERR), 1582 ATA_INL(ch->r_mem, AHCI_P_CMD)); 1583 et = AHCI_ERR_TIMEOUT; 1584 } 1585 1586 /* Kick controller into sane state and enable FBS. */ 1587 if (softreset == 2) 1588 ch->eslots |= (1 << slot->slot); 1589 ahci_end_transaction(slot, et); 1590 return; 1591 } 1592 /* Start command execution timeout */ 1593 callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout / 2, 1594 0, (timeout_t*)ahci_timeout, slot, 0); 1595 return; 1596 } 1597 1598 /* Must be called with channel locked. */ 1599 static void 1600 ahci_process_timeout(struct ahci_channel *ch) 1601 { 1602 int i; 1603 1604 mtx_assert(&ch->mtx, MA_OWNED); 1605 /* Handle the rest of commands. */ 1606 for (i = 0; i < ch->numslots; i++) { 1607 /* Do we have a running request on slot? */ 1608 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 1609 continue; 1610 ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT); 1611 } 1612 } 1613 1614 /* Must be called with channel locked. */ 1615 static void 1616 ahci_rearm_timeout(struct ahci_channel *ch) 1617 { 1618 int i; 1619 1620 mtx_assert(&ch->mtx, MA_OWNED); 1621 for (i = 0; i < ch->numslots; i++) { 1622 struct ahci_slot *slot = &ch->slot[i]; 1623 1624 /* Do we have a running request on slot? */ 1625 if (slot->state < AHCI_SLOT_RUNNING) 1626 continue; 1627 if ((ch->toslots & (1 << i)) == 0) 1628 continue; 1629 callout_reset_sbt(&slot->timeout, 1630 SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, 1631 (timeout_t*)ahci_timeout, slot, 0); 1632 } 1633 } 1634 1635 /* Locked by callout mechanism. */ 1636 static void 1637 ahci_timeout(struct ahci_slot *slot) 1638 { 1639 struct ahci_channel *ch = slot->ch; 1640 device_t dev = ch->dev; 1641 uint32_t sstatus; 1642 int ccs; 1643 int i; 1644 1645 /* Check for stale timeout. */ 1646 if (slot->state < AHCI_SLOT_RUNNING) 1647 return; 1648 1649 /* Check if slot was not being executed last time we checked. */ 1650 if (slot->state < AHCI_SLOT_EXECUTING) { 1651 /* Check if slot started executing. */ 1652 sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); 1653 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) 1654 >> AHCI_P_CMD_CCS_SHIFT; 1655 if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot || 1656 ch->fbs_enabled || ch->wrongccs) 1657 slot->state = AHCI_SLOT_EXECUTING; 1658 else if ((ch->rslots & (1 << ccs)) == 0) { 1659 ch->wrongccs = 1; 1660 slot->state = AHCI_SLOT_EXECUTING; 1661 } 1662 1663 callout_reset_sbt(&slot->timeout, 1664 SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, 1665 (timeout_t*)ahci_timeout, slot, 0); 1666 return; 1667 } 1668 1669 device_printf(dev, "Timeout on slot %d port %d\n", 1670 slot->slot, slot->ccb->ccb_h.target_id & 0x0f); 1671 device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x " 1672 "serr %08x cmd %08x\n", 1673 ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), 1674 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, 1675 ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), 1676 ATA_INL(ch->r_mem, AHCI_P_CMD)); 1677 1678 /* Handle frozen command. */ 1679 if (ch->frozen) { 1680 union ccb *fccb = ch->frozen; 1681 ch->frozen = NULL; 1682 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 1683 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 1684 xpt_freeze_devq(fccb->ccb_h.path, 1); 1685 fccb->ccb_h.status |= CAM_DEV_QFRZN; 1686 } 1687 ahci_done(ch, fccb); 1688 } 1689 if (!ch->fbs_enabled && !ch->wrongccs) { 1690 /* Without FBS we know real timeout source. */ 1691 ch->fatalerr = 1; 1692 /* Handle command with timeout. */ 1693 ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT); 1694 /* Handle the rest of commands. */ 1695 for (i = 0; i < ch->numslots; i++) { 1696 /* Do we have a running request on slot? */ 1697 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 1698 continue; 1699 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); 1700 } 1701 } else { 1702 /* With FBS we wait for other commands timeout and pray. */ 1703 if (ch->toslots == 0) 1704 xpt_freeze_simq(ch->sim, 1); 1705 ch->toslots |= (1 << slot->slot); 1706 if ((ch->rslots & ~ch->toslots) == 0) 1707 ahci_process_timeout(ch); 1708 else 1709 device_printf(dev, " ... waiting for slots %08x\n", 1710 ch->rslots & ~ch->toslots); 1711 } 1712 } 1713 1714 /* Must be called with channel locked. */ 1715 static void 1716 ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et) 1717 { 1718 struct ahci_channel *ch = slot->ch; 1719 union ccb *ccb = slot->ccb; 1720 struct ahci_cmd_list *clp; 1721 int lastto; 1722 uint32_t sig; 1723 1724 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 1725 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1726 clp = (struct ahci_cmd_list *) 1727 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); 1728 /* Read result registers to the result struct 1729 * May be incorrect if several commands finished same time, 1730 * so read only when sure or have to. 1731 */ 1732 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1733 struct ata_res *res = &ccb->ataio.res; 1734 1735 if ((et == AHCI_ERR_TFE) || 1736 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { 1737 u_int8_t *fis = ch->dma.rfis + 0x40; 1738 1739 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, 1740 BUS_DMASYNC_POSTREAD); 1741 if (ch->fbs_enabled) { 1742 fis += ccb->ccb_h.target_id * 256; 1743 res->status = fis[2]; 1744 res->error = fis[3]; 1745 } else { 1746 uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD); 1747 1748 res->status = tfd; 1749 res->error = tfd >> 8; 1750 } 1751 res->lba_low = fis[4]; 1752 res->lba_mid = fis[5]; 1753 res->lba_high = fis[6]; 1754 res->device = fis[7]; 1755 res->lba_low_exp = fis[8]; 1756 res->lba_mid_exp = fis[9]; 1757 res->lba_high_exp = fis[10]; 1758 res->sector_count = fis[12]; 1759 res->sector_count_exp = fis[13]; 1760 1761 /* 1762 * Some weird controllers do not return signature in 1763 * FIS receive area. Read it from PxSIG register. 1764 */ 1765 if ((ch->quirks & AHCI_Q_ALTSIG) && 1766 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1767 (ccb->ataio.cmd.control & ATA_A_RESET) == 0) { 1768 sig = ATA_INL(ch->r_mem, AHCI_P_SIG); 1769 res->lba_high = sig >> 24; 1770 res->lba_mid = sig >> 16; 1771 res->lba_low = sig >> 8; 1772 res->sector_count = sig; 1773 } 1774 } else 1775 bzero(res, sizeof(*res)); 1776 if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 && 1777 (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1778 (ch->quirks & AHCI_Q_NOCOUNT) == 0) { 1779 ccb->ataio.resid = 1780 ccb->ataio.dxfer_len - le32toh(clp->bytecount); 1781 } 1782 } else { 1783 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1784 (ch->quirks & AHCI_Q_NOCOUNT) == 0) { 1785 ccb->csio.resid = 1786 ccb->csio.dxfer_len - le32toh(clp->bytecount); 1787 } 1788 } 1789 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1790 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, 1791 (ccb->ccb_h.flags & CAM_DIR_IN) ? 1792 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1793 bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); 1794 } 1795 if (et != AHCI_ERR_NONE) 1796 ch->eslots |= (1 << slot->slot); 1797 /* In case of error, freeze device for proper recovery. */ 1798 if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) && 1799 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 1800 xpt_freeze_devq(ccb->ccb_h.path, 1); 1801 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1802 } 1803 /* Set proper result status. */ 1804 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1805 switch (et) { 1806 case AHCI_ERR_NONE: 1807 ccb->ccb_h.status |= CAM_REQ_CMP; 1808 if (ccb->ccb_h.func_code == XPT_SCSI_IO) 1809 ccb->csio.scsi_status = SCSI_STATUS_OK; 1810 break; 1811 case AHCI_ERR_INVALID: 1812 ch->fatalerr = 1; 1813 ccb->ccb_h.status |= CAM_REQ_INVALID; 1814 break; 1815 case AHCI_ERR_INNOCENT: 1816 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1817 break; 1818 case AHCI_ERR_TFE: 1819 case AHCI_ERR_NCQ: 1820 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 1821 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1822 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1823 } else { 1824 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; 1825 } 1826 break; 1827 case AHCI_ERR_SATA: 1828 ch->fatalerr = 1; 1829 if (!ch->recoverycmd) { 1830 xpt_freeze_simq(ch->sim, 1); 1831 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1832 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1833 } 1834 ccb->ccb_h.status |= CAM_UNCOR_PARITY; 1835 break; 1836 case AHCI_ERR_TIMEOUT: 1837 if (!ch->recoverycmd) { 1838 xpt_freeze_simq(ch->sim, 1); 1839 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1840 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1841 } 1842 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 1843 break; 1844 default: 1845 ch->fatalerr = 1; 1846 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 1847 } 1848 /* Free slot. */ 1849 ch->oslots &= ~(1 << slot->slot); 1850 ch->rslots &= ~(1 << slot->slot); 1851 ch->aslots &= ~(1 << slot->slot); 1852 slot->state = AHCI_SLOT_EMPTY; 1853 slot->ccb = NULL; 1854 /* Update channel stats. */ 1855 ch->numrslots--; 1856 ch->numrslotspd[ccb->ccb_h.target_id]--; 1857 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1858 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1859 ch->numtslots--; 1860 ch->numtslotspd[ccb->ccb_h.target_id]--; 1861 } 1862 /* Cancel timeout state if request completed normally. */ 1863 if (et != AHCI_ERR_TIMEOUT) { 1864 lastto = (ch->toslots == (1 << slot->slot)); 1865 ch->toslots &= ~(1 << slot->slot); 1866 if (lastto) 1867 xpt_release_simq(ch->sim, TRUE); 1868 } 1869 /* If it was first request of reset sequence and there is no error, 1870 * proceed to second request. */ 1871 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1872 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1873 (ccb->ataio.cmd.control & ATA_A_RESET) && 1874 et == AHCI_ERR_NONE) { 1875 ccb->ataio.cmd.control &= ~ATA_A_RESET; 1876 ahci_begin_transaction(ch, ccb); 1877 return; 1878 } 1879 /* If it was our READ LOG command - process it. */ 1880 if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { 1881 ahci_process_read_log(ch, ccb); 1882 /* If it was our REQUEST SENSE command - process it. */ 1883 } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { 1884 ahci_process_request_sense(ch, ccb); 1885 /* If it was NCQ or ATAPI command error, put result on hold. */ 1886 } else if (et == AHCI_ERR_NCQ || 1887 ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && 1888 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { 1889 ch->hold[slot->slot] = ccb; 1890 ch->numhslots++; 1891 } else 1892 ahci_done(ch, ccb); 1893 /* If we have no other active commands, ... */ 1894 if (ch->rslots == 0) { 1895 /* if there was fatal error - reset port. */ 1896 if (ch->toslots != 0 || ch->fatalerr) { 1897 ahci_reset(ch); 1898 } else { 1899 /* if we have slots in error, we can reinit port. */ 1900 if (ch->eslots != 0) { 1901 ahci_stop(ch); 1902 ahci_clo(ch); 1903 ahci_start(ch, 1); 1904 } 1905 /* if there commands on hold, we can do READ LOG. */ 1906 if (!ch->recoverycmd && ch->numhslots) 1907 ahci_issue_recovery(ch); 1908 } 1909 /* If all the rest of commands are in timeout - give them chance. */ 1910 } else if ((ch->rslots & ~ch->toslots) == 0 && 1911 et != AHCI_ERR_TIMEOUT) 1912 ahci_rearm_timeout(ch); 1913 /* Unfreeze frozen command. */ 1914 if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) { 1915 union ccb *fccb = ch->frozen; 1916 ch->frozen = NULL; 1917 ahci_begin_transaction(ch, fccb); 1918 xpt_release_simq(ch->sim, TRUE); 1919 } 1920 /* Start PM timer. */ 1921 if (ch->numrslots == 0 && ch->pm_level > 3 && 1922 (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) { 1923 callout_schedule(&ch->pm_timer, 1924 (ch->pm_level == 4) ? hz / 1000 : hz / 8); 1925 } 1926 } 1927 1928 static void 1929 ahci_issue_recovery(struct ahci_channel *ch) 1930 { 1931 union ccb *ccb; 1932 struct ccb_ataio *ataio; 1933 struct ccb_scsiio *csio; 1934 int i; 1935 1936 /* Find some held command. */ 1937 for (i = 0; i < ch->numslots; i++) { 1938 if (ch->hold[i]) 1939 break; 1940 } 1941 ccb = xpt_alloc_ccb_nowait(); 1942 if (ccb == NULL) { 1943 device_printf(ch->dev, "Unable to allocate recovery command\n"); 1944 completeall: 1945 /* We can't do anything -- complete held commands. */ 1946 for (i = 0; i < ch->numslots; i++) { 1947 if (ch->hold[i] == NULL) 1948 continue; 1949 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 1950 ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; 1951 ahci_done(ch, ch->hold[i]); 1952 ch->hold[i] = NULL; 1953 ch->numhslots--; 1954 } 1955 ahci_reset(ch); 1956 return; 1957 } 1958 ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */ 1959 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1960 /* READ LOG */ 1961 ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; 1962 ccb->ccb_h.func_code = XPT_ATA_IO; 1963 ccb->ccb_h.flags = CAM_DIR_IN; 1964 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ 1965 ataio = &ccb->ataio; 1966 ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT); 1967 if (ataio->data_ptr == NULL) { 1968 xpt_free_ccb(ccb); 1969 device_printf(ch->dev, 1970 "Unable to allocate memory for READ LOG command\n"); 1971 goto completeall; 1972 } 1973 ataio->dxfer_len = 512; 1974 bzero(&ataio->cmd, sizeof(ataio->cmd)); 1975 ataio->cmd.flags = CAM_ATAIO_48BIT; 1976 ataio->cmd.command = 0x2F; /* READ LOG EXT */ 1977 ataio->cmd.sector_count = 1; 1978 ataio->cmd.sector_count_exp = 0; 1979 ataio->cmd.lba_low = 0x10; 1980 ataio->cmd.lba_mid = 0; 1981 ataio->cmd.lba_mid_exp = 0; 1982 } else { 1983 /* REQUEST SENSE */ 1984 ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; 1985 ccb->ccb_h.recovery_slot = i; 1986 ccb->ccb_h.func_code = XPT_SCSI_IO; 1987 ccb->ccb_h.flags = CAM_DIR_IN; 1988 ccb->ccb_h.status = 0; 1989 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ 1990 csio = &ccb->csio; 1991 csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; 1992 csio->dxfer_len = ch->hold[i]->csio.sense_len; 1993 csio->cdb_len = 6; 1994 bzero(&csio->cdb_io, sizeof(csio->cdb_io)); 1995 csio->cdb_io.cdb_bytes[0] = 0x03; 1996 csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; 1997 } 1998 /* Freeze SIM while doing recovery. */ 1999 ch->recoverycmd = 1; 2000 xpt_freeze_simq(ch->sim, 1); 2001 ahci_begin_transaction(ch, ccb); 2002 } 2003 2004 static void 2005 ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb) 2006 { 2007 uint8_t *data; 2008 struct ata_res *res; 2009 int i; 2010 2011 ch->recoverycmd = 0; 2012 2013 data = ccb->ataio.data_ptr; 2014 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2015 (data[0] & 0x80) == 0) { 2016 for (i = 0; i < ch->numslots; i++) { 2017 if (!ch->hold[i]) 2018 continue; 2019 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) 2020 continue; 2021 if ((data[0] & 0x1F) == i) { 2022 res = &ch->hold[i]->ataio.res; 2023 res->status = data[2]; 2024 res->error = data[3]; 2025 res->lba_low = data[4]; 2026 res->lba_mid = data[5]; 2027 res->lba_high = data[6]; 2028 res->device = data[7]; 2029 res->lba_low_exp = data[8]; 2030 res->lba_mid_exp = data[9]; 2031 res->lba_high_exp = data[10]; 2032 res->sector_count = data[12]; 2033 res->sector_count_exp = data[13]; 2034 } else { 2035 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2036 ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; 2037 } 2038 ahci_done(ch, ch->hold[i]); 2039 ch->hold[i] = NULL; 2040 ch->numhslots--; 2041 } 2042 } else { 2043 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 2044 device_printf(ch->dev, "Error while READ LOG EXT\n"); 2045 else if ((data[0] & 0x80) == 0) { 2046 device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n"); 2047 } 2048 for (i = 0; i < ch->numslots; i++) { 2049 if (!ch->hold[i]) 2050 continue; 2051 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) 2052 continue; 2053 ahci_done(ch, ch->hold[i]); 2054 ch->hold[i] = NULL; 2055 ch->numhslots--; 2056 } 2057 } 2058 free(ccb->ataio.data_ptr, M_AHCI); 2059 xpt_free_ccb(ccb); 2060 xpt_release_simq(ch->sim, TRUE); 2061 } 2062 2063 static void 2064 ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb) 2065 { 2066 int i; 2067 2068 ch->recoverycmd = 0; 2069 2070 i = ccb->ccb_h.recovery_slot; 2071 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 2072 ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; 2073 } else { 2074 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2075 ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2076 } 2077 ahci_done(ch, ch->hold[i]); 2078 ch->hold[i] = NULL; 2079 ch->numhslots--; 2080 xpt_free_ccb(ccb); 2081 xpt_release_simq(ch->sim, TRUE); 2082 } 2083 2084 static void 2085 ahci_start(struct ahci_channel *ch, int fbs) 2086 { 2087 u_int32_t cmd; 2088 2089 /* Clear SATA error register */ 2090 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF); 2091 /* Clear any interrupts pending on this channel */ 2092 ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF); 2093 /* Configure FIS-based switching if supported. */ 2094 if (ch->chcaps & AHCI_P_CMD_FBSCP) { 2095 ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0; 2096 ATA_OUTL(ch->r_mem, AHCI_P_FBS, 2097 ch->fbs_enabled ? AHCI_P_FBS_EN : 0); 2098 } 2099 /* Start operations on this channel */ 2100 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2101 cmd &= ~AHCI_P_CMD_PMA; 2102 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST | 2103 (ch->pm_present ? AHCI_P_CMD_PMA : 0)); 2104 } 2105 2106 static void 2107 ahci_stop(struct ahci_channel *ch) 2108 { 2109 u_int32_t cmd; 2110 int timeout; 2111 2112 /* Kill all activity on this channel */ 2113 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2114 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST); 2115 /* Wait for activity stop. */ 2116 timeout = 0; 2117 do { 2118 DELAY(10); 2119 if (timeout++ > 50000) { 2120 device_printf(ch->dev, "stopping AHCI engine failed\n"); 2121 break; 2122 } 2123 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR); 2124 ch->eslots = 0; 2125 } 2126 2127 static void 2128 ahci_clo(struct ahci_channel *ch) 2129 { 2130 u_int32_t cmd; 2131 int timeout; 2132 2133 /* Issue Command List Override if supported */ 2134 if (ch->caps & AHCI_CAP_SCLO) { 2135 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2136 cmd |= AHCI_P_CMD_CLO; 2137 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd); 2138 timeout = 0; 2139 do { 2140 DELAY(10); 2141 if (timeout++ > 50000) { 2142 device_printf(ch->dev, "executing CLO failed\n"); 2143 break; 2144 } 2145 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO); 2146 } 2147 } 2148 2149 static void 2150 ahci_stop_fr(struct ahci_channel *ch) 2151 { 2152 u_int32_t cmd; 2153 int timeout; 2154 2155 /* Kill all FIS reception on this channel */ 2156 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2157 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE); 2158 /* Wait for FIS reception stop. */ 2159 timeout = 0; 2160 do { 2161 DELAY(10); 2162 if (timeout++ > 50000) { 2163 device_printf(ch->dev, "stopping AHCI FR engine failed\n"); 2164 break; 2165 } 2166 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR); 2167 } 2168 2169 static void 2170 ahci_start_fr(struct ahci_channel *ch) 2171 { 2172 u_int32_t cmd; 2173 2174 /* Start FIS reception on this channel */ 2175 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2176 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE); 2177 } 2178 2179 static int 2180 ahci_wait_ready(struct ahci_channel *ch, int t, int t0) 2181 { 2182 int timeout = 0; 2183 uint32_t val; 2184 2185 while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) & 2186 (ATA_S_BUSY | ATA_S_DRQ)) { 2187 if (timeout > t) { 2188 if (t != 0) { 2189 device_printf(ch->dev, 2190 "AHCI reset: device not ready after %dms " 2191 "(tfd = %08x)\n", 2192 MAX(t, 0) + t0, val); 2193 } 2194 return (EBUSY); 2195 } 2196 DELAY(1000); 2197 timeout++; 2198 } 2199 if (bootverbose) 2200 device_printf(ch->dev, "AHCI reset: device ready after %dms\n", 2201 timeout + t0); 2202 return (0); 2203 } 2204 2205 static void 2206 ahci_reset_to(void *arg) 2207 { 2208 struct ahci_channel *ch = arg; 2209 2210 if (ch->resetting == 0) 2211 return; 2212 ch->resetting--; 2213 if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0, 2214 (310 - ch->resetting) * 100) == 0) { 2215 ch->resetting = 0; 2216 ahci_start(ch, 1); 2217 xpt_release_simq(ch->sim, TRUE); 2218 return; 2219 } 2220 if (ch->resetting == 0) { 2221 ahci_clo(ch); 2222 ahci_start(ch, 1); 2223 xpt_release_simq(ch->sim, TRUE); 2224 return; 2225 } 2226 callout_schedule(&ch->reset_timer, hz / 10); 2227 } 2228 2229 static void 2230 ahci_reset(struct ahci_channel *ch) 2231 { 2232 struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); 2233 int i; 2234 2235 xpt_freeze_simq(ch->sim, 1); 2236 if (bootverbose) 2237 device_printf(ch->dev, "AHCI reset...\n"); 2238 /* Forget about previous reset. */ 2239 if (ch->resetting) { 2240 ch->resetting = 0; 2241 callout_stop(&ch->reset_timer); 2242 xpt_release_simq(ch->sim, TRUE); 2243 } 2244 /* Requeue freezed command. */ 2245 if (ch->frozen) { 2246 union ccb *fccb = ch->frozen; 2247 ch->frozen = NULL; 2248 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 2249 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 2250 xpt_freeze_devq(fccb->ccb_h.path, 1); 2251 fccb->ccb_h.status |= CAM_DEV_QFRZN; 2252 } 2253 ahci_done(ch, fccb); 2254 } 2255 /* Kill the engine and requeue all running commands. */ 2256 ahci_stop(ch); 2257 for (i = 0; i < ch->numslots; i++) { 2258 /* Do we have a running request on slot? */ 2259 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 2260 continue; 2261 /* XXX; Commands in loading state. */ 2262 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); 2263 } 2264 for (i = 0; i < ch->numslots; i++) { 2265 if (!ch->hold[i]) 2266 continue; 2267 ahci_done(ch, ch->hold[i]); 2268 ch->hold[i] = NULL; 2269 ch->numhslots--; 2270 } 2271 if (ch->toslots != 0) 2272 xpt_release_simq(ch->sim, TRUE); 2273 ch->eslots = 0; 2274 ch->toslots = 0; 2275 ch->wrongccs = 0; 2276 ch->fatalerr = 0; 2277 /* Tell the XPT about the event */ 2278 xpt_async(AC_BUS_RESET, ch->path, NULL); 2279 /* Disable port interrupts */ 2280 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 2281 /* Reset and reconnect PHY, */ 2282 if (!ahci_sata_phy_reset(ch)) { 2283 if (bootverbose) 2284 device_printf(ch->dev, 2285 "AHCI reset: device not found\n"); 2286 ch->devices = 0; 2287 /* Enable wanted port interrupts */ 2288 ATA_OUTL(ch->r_mem, AHCI_P_IE, 2289 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | 2290 AHCI_P_IX_PRC | AHCI_P_IX_PC)); 2291 xpt_release_simq(ch->sim, TRUE); 2292 return; 2293 } 2294 if (bootverbose) 2295 device_printf(ch->dev, "AHCI reset: device found\n"); 2296 /* Wait for clearing busy status. */ 2297 if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) { 2298 if (dumping) 2299 ahci_clo(ch); 2300 else 2301 ch->resetting = 310; 2302 } 2303 ch->devices = 1; 2304 /* Enable wanted port interrupts */ 2305 ATA_OUTL(ch->r_mem, AHCI_P_IE, 2306 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | 2307 AHCI_P_IX_TFE | AHCI_P_IX_HBF | 2308 AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF | 2309 ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC | 2310 AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) | 2311 AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR))); 2312 if (ch->resetting) 2313 callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch); 2314 else { 2315 ahci_start(ch, 1); 2316 xpt_release_simq(ch->sim, TRUE); 2317 } 2318 } 2319 2320 static int 2321 ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag) 2322 { 2323 u_int8_t *fis = &ctp->cfis[0]; 2324 2325 bzero(fis, 20); 2326 fis[0] = 0x27; /* host to device */ 2327 fis[1] = (ccb->ccb_h.target_id & 0x0f); 2328 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 2329 fis[1] |= 0x80; 2330 fis[2] = ATA_PACKET_CMD; 2331 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 2332 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 2333 fis[3] = ATA_F_DMA; 2334 else { 2335 fis[5] = ccb->csio.dxfer_len; 2336 fis[6] = ccb->csio.dxfer_len >> 8; 2337 } 2338 fis[7] = ATA_D_LBA; 2339 fis[15] = ATA_A_4BIT; 2340 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 2341 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, 2342 ctp->acmd, ccb->csio.cdb_len); 2343 bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len); 2344 } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { 2345 fis[1] |= 0x80; 2346 fis[2] = ccb->ataio.cmd.command; 2347 fis[3] = ccb->ataio.cmd.features; 2348 fis[4] = ccb->ataio.cmd.lba_low; 2349 fis[5] = ccb->ataio.cmd.lba_mid; 2350 fis[6] = ccb->ataio.cmd.lba_high; 2351 fis[7] = ccb->ataio.cmd.device; 2352 fis[8] = ccb->ataio.cmd.lba_low_exp; 2353 fis[9] = ccb->ataio.cmd.lba_mid_exp; 2354 fis[10] = ccb->ataio.cmd.lba_high_exp; 2355 fis[11] = ccb->ataio.cmd.features_exp; 2356 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { 2357 fis[12] = tag << 3; 2358 fis[13] = 0; 2359 } else { 2360 fis[12] = ccb->ataio.cmd.sector_count; 2361 fis[13] = ccb->ataio.cmd.sector_count_exp; 2362 } 2363 fis[15] = ATA_A_4BIT; 2364 } else { 2365 fis[15] = ccb->ataio.cmd.control; 2366 } 2367 return (20); 2368 } 2369 2370 static int 2371 ahci_sata_connect(struct ahci_channel *ch) 2372 { 2373 u_int32_t status; 2374 int timeout, found = 0; 2375 2376 /* Wait up to 100ms for "connect well" */ 2377 for (timeout = 0; timeout < 1000 ; timeout++) { 2378 status = ATA_INL(ch->r_mem, AHCI_P_SSTS); 2379 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) 2380 found = 1; 2381 if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && 2382 ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && 2383 ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) 2384 break; 2385 if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { 2386 if (bootverbose) { 2387 device_printf(ch->dev, "SATA offline status=%08x\n", 2388 status); 2389 } 2390 return (0); 2391 } 2392 if (found == 0 && timeout >= 100) 2393 break; 2394 DELAY(100); 2395 } 2396 if (timeout >= 1000 || !found) { 2397 if (bootverbose) { 2398 device_printf(ch->dev, 2399 "SATA connect timeout time=%dus status=%08x\n", 2400 timeout * 100, status); 2401 } 2402 return (0); 2403 } 2404 if (bootverbose) { 2405 device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", 2406 timeout * 100, status); 2407 } 2408 /* Clear SATA error register */ 2409 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff); 2410 return (1); 2411 } 2412 2413 static int 2414 ahci_sata_phy_reset(struct ahci_channel *ch) 2415 { 2416 int sata_rev; 2417 uint32_t val; 2418 2419 if (ch->listening) { 2420 val = ATA_INL(ch->r_mem, AHCI_P_CMD); 2421 val |= AHCI_P_CMD_SUD; 2422 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); 2423 ch->listening = 0; 2424 } 2425 sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; 2426 if (sata_rev == 1) 2427 val = ATA_SC_SPD_SPEED_GEN1; 2428 else if (sata_rev == 2) 2429 val = ATA_SC_SPD_SPEED_GEN2; 2430 else if (sata_rev == 3) 2431 val = ATA_SC_SPD_SPEED_GEN3; 2432 else 2433 val = 0; 2434 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 2435 ATA_SC_DET_RESET | val | 2436 ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER); 2437 DELAY(1000); 2438 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 2439 ATA_SC_DET_IDLE | val | ((ch->pm_level > 0) ? 0 : 2440 (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); 2441 if (!ahci_sata_connect(ch)) { 2442 if (ch->caps & AHCI_CAP_SSS) { 2443 val = ATA_INL(ch->r_mem, AHCI_P_CMD); 2444 val &= ~AHCI_P_CMD_SUD; 2445 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); 2446 ch->listening = 1; 2447 } else if (ch->pm_level > 0) 2448 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); 2449 return (0); 2450 } 2451 return (1); 2452 } 2453 2454 static int 2455 ahci_check_ids(struct ahci_channel *ch, union ccb *ccb) 2456 { 2457 2458 if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) { 2459 ccb->ccb_h.status = CAM_TID_INVALID; 2460 ahci_done(ch, ccb); 2461 return (-1); 2462 } 2463 if (ccb->ccb_h.target_lun != 0) { 2464 ccb->ccb_h.status = CAM_LUN_INVALID; 2465 ahci_done(ch, ccb); 2466 return (-1); 2467 } 2468 return (0); 2469 } 2470 2471 static void 2472 ahciaction(struct cam_sim *sim, union ccb *ccb) 2473 { 2474 struct ahci_channel *ch; 2475 2476 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n", 2477 ccb->ccb_h.func_code)); 2478 2479 ch = (struct ahci_channel *)cam_sim_softc(sim); 2480 switch (ccb->ccb_h.func_code) { 2481 /* Common cases first */ 2482 case XPT_ATA_IO: /* Execute the requested I/O operation */ 2483 case XPT_SCSI_IO: 2484 if (ahci_check_ids(ch, ccb)) 2485 return; 2486 if (ch->devices == 0 || 2487 (ch->pm_present == 0 && 2488 ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { 2489 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 2490 break; 2491 } 2492 ccb->ccb_h.recovery_type = RECOVERY_NONE; 2493 /* Check for command collision. */ 2494 if (ahci_check_collision(ch, ccb)) { 2495 /* Freeze command. */ 2496 ch->frozen = ccb; 2497 /* We have only one frozen slot, so freeze simq also. */ 2498 xpt_freeze_simq(ch->sim, 1); 2499 return; 2500 } 2501 ahci_begin_transaction(ch, ccb); 2502 return; 2503 case XPT_EN_LUN: /* Enable LUN as a target */ 2504 case XPT_TARGET_IO: /* Execute target I/O request */ 2505 case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ 2506 case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ 2507 case XPT_ABORT: /* Abort the specified CCB */ 2508 /* XXX Implement */ 2509 ccb->ccb_h.status = CAM_REQ_INVALID; 2510 break; 2511 case XPT_SET_TRAN_SETTINGS: 2512 { 2513 struct ccb_trans_settings *cts = &ccb->cts; 2514 struct ahci_device *d; 2515 2516 if (ahci_check_ids(ch, ccb)) 2517 return; 2518 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 2519 d = &ch->curr[ccb->ccb_h.target_id]; 2520 else 2521 d = &ch->user[ccb->ccb_h.target_id]; 2522 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) 2523 d->revision = cts->xport_specific.sata.revision; 2524 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) 2525 d->mode = cts->xport_specific.sata.mode; 2526 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) 2527 d->bytecount = min(8192, cts->xport_specific.sata.bytecount); 2528 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) 2529 d->tags = min(ch->numslots, cts->xport_specific.sata.tags); 2530 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) 2531 ch->pm_present = cts->xport_specific.sata.pm_present; 2532 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) 2533 d->atapi = cts->xport_specific.sata.atapi; 2534 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) 2535 d->caps = cts->xport_specific.sata.caps; 2536 ccb->ccb_h.status = CAM_REQ_CMP; 2537 break; 2538 } 2539 case XPT_GET_TRAN_SETTINGS: 2540 /* Get default/user set transfer settings for the target */ 2541 { 2542 struct ccb_trans_settings *cts = &ccb->cts; 2543 struct ahci_device *d; 2544 uint32_t status; 2545 2546 if (ahci_check_ids(ch, ccb)) 2547 return; 2548 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 2549 d = &ch->curr[ccb->ccb_h.target_id]; 2550 else 2551 d = &ch->user[ccb->ccb_h.target_id]; 2552 cts->protocol = PROTO_UNSPECIFIED; 2553 cts->protocol_version = PROTO_VERSION_UNSPECIFIED; 2554 cts->transport = XPORT_SATA; 2555 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 2556 cts->proto_specific.valid = 0; 2557 cts->xport_specific.sata.valid = 0; 2558 if (cts->type == CTS_TYPE_CURRENT_SETTINGS && 2559 (ccb->ccb_h.target_id == 15 || 2560 (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { 2561 status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK; 2562 if (status & 0x0f0) { 2563 cts->xport_specific.sata.revision = 2564 (status & 0x0f0) >> 4; 2565 cts->xport_specific.sata.valid |= 2566 CTS_SATA_VALID_REVISION; 2567 } 2568 cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; 2569 if (ch->pm_level) { 2570 if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC)) 2571 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; 2572 if (ch->caps2 & AHCI_CAP2_APST) 2573 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST; 2574 } 2575 if ((ch->caps & AHCI_CAP_SNCQ) && 2576 (ch->quirks & AHCI_Q_NOAA) == 0) 2577 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA; 2578 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; 2579 cts->xport_specific.sata.caps &= 2580 ch->user[ccb->ccb_h.target_id].caps; 2581 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 2582 } else { 2583 cts->xport_specific.sata.revision = d->revision; 2584 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; 2585 cts->xport_specific.sata.caps = d->caps; 2586 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 2587 } 2588 cts->xport_specific.sata.mode = d->mode; 2589 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; 2590 cts->xport_specific.sata.bytecount = d->bytecount; 2591 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; 2592 cts->xport_specific.sata.pm_present = ch->pm_present; 2593 cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; 2594 cts->xport_specific.sata.tags = d->tags; 2595 cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; 2596 cts->xport_specific.sata.atapi = d->atapi; 2597 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; 2598 ccb->ccb_h.status = CAM_REQ_CMP; 2599 break; 2600 } 2601 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 2602 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 2603 ahci_reset(ch); 2604 ccb->ccb_h.status = CAM_REQ_CMP; 2605 break; 2606 case XPT_TERM_IO: /* Terminate the I/O process */ 2607 /* XXX Implement */ 2608 ccb->ccb_h.status = CAM_REQ_INVALID; 2609 break; 2610 case XPT_PATH_INQ: /* Path routing inquiry */ 2611 { 2612 struct ccb_pathinq *cpi = &ccb->cpi; 2613 2614 cpi->version_num = 1; /* XXX??? */ 2615 cpi->hba_inquiry = PI_SDTR_ABLE; 2616 if (ch->caps & AHCI_CAP_SNCQ) 2617 cpi->hba_inquiry |= PI_TAG_ABLE; 2618 if (ch->caps & AHCI_CAP_SPM) 2619 cpi->hba_inquiry |= PI_SATAPM; 2620 cpi->target_sprt = 0; 2621 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; 2622 cpi->hba_eng_cnt = 0; 2623 if (ch->caps & AHCI_CAP_SPM) 2624 cpi->max_target = 15; 2625 else 2626 cpi->max_target = 0; 2627 cpi->max_lun = 0; 2628 cpi->initiator_id = 0; 2629 cpi->bus_id = cam_sim_bus(sim); 2630 cpi->base_transfer_speed = 150000; 2631 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2632 strncpy(cpi->hba_vid, "AHCI", HBA_IDLEN); 2633 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2634 cpi->unit_number = cam_sim_unit(sim); 2635 cpi->transport = XPORT_SATA; 2636 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 2637 cpi->protocol = PROTO_ATA; 2638 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 2639 cpi->maxio = MAXPHYS; 2640 /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */ 2641 if (ch->quirks & AHCI_Q_MAXIO_64K) 2642 cpi->maxio = min(cpi->maxio, 128 * 512); 2643 cpi->hba_vendor = ch->vendorid; 2644 cpi->hba_device = ch->deviceid; 2645 cpi->hba_subvendor = ch->subvendorid; 2646 cpi->hba_subdevice = ch->subdeviceid; 2647 cpi->ccb_h.status = CAM_REQ_CMP; 2648 break; 2649 } 2650 default: 2651 ccb->ccb_h.status = CAM_REQ_INVALID; 2652 break; 2653 } 2654 ahci_done(ch, ccb); 2655 } 2656 2657 static void 2658 ahcipoll(struct cam_sim *sim) 2659 { 2660 struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim); 2661 uint32_t istatus; 2662 2663 /* Read interrupt statuses and process if any. */ 2664 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 2665 if (istatus != 0) 2666 ahci_ch_intr_main(ch, istatus); 2667 if (ch->resetting != 0 && 2668 (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) { 2669 ch->resetpolldiv = 1000; 2670 ahci_reset_to(ch); 2671 } 2672 } 2673 MODULE_VERSION(ahci, 1); 2674 MODULE_DEPEND(ahci, cam, 1, 1, 1); 2675