1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009-2012 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/module.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/conf.h> 38 #include <sys/endian.h> 39 #include <sys/malloc.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/sysctl.h> 43 #include <machine/stdarg.h> 44 #include <machine/resource.h> 45 #include <machine/bus.h> 46 #include <sys/rman.h> 47 #include "ahci.h" 48 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include <cam/cam_sim.h> 52 #include <cam/cam_xpt_sim.h> 53 #include <cam/cam_debug.h> 54 55 /* local prototypes */ 56 static void ahci_intr(void *data); 57 static void ahci_intr_one(void *data); 58 static void ahci_intr_one_edge(void *data); 59 static int ahci_ch_init(device_t dev); 60 static int ahci_ch_deinit(device_t dev); 61 static int ahci_ch_suspend(device_t dev); 62 static int ahci_ch_resume(device_t dev); 63 static void ahci_ch_pm(void *arg); 64 static void ahci_ch_intr(void *arg); 65 static void ahci_ch_intr_direct(void *arg); 66 static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus); 67 static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb); 68 static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 69 static void ahci_execute_transaction(struct ahci_slot *slot); 70 static void ahci_timeout(struct ahci_slot *slot); 71 static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et); 72 static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag); 73 static void ahci_dmainit(device_t dev); 74 static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); 75 static void ahci_dmafini(device_t dev); 76 static void ahci_slotsalloc(device_t dev); 77 static void ahci_slotsfree(device_t dev); 78 static void ahci_reset(struct ahci_channel *ch); 79 static void ahci_start(struct ahci_channel *ch, int fbs); 80 static void ahci_stop(struct ahci_channel *ch); 81 static void ahci_clo(struct ahci_channel *ch); 82 static void ahci_start_fr(struct ahci_channel *ch); 83 static void ahci_stop_fr(struct ahci_channel *ch); 84 static int ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr); 85 static uint32_t ahci_ch_detval(struct ahci_channel *ch, uint32_t val); 86 87 static int ahci_sata_connect(struct ahci_channel *ch); 88 static int ahci_sata_phy_reset(struct ahci_channel *ch); 89 static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0); 90 91 static void ahci_issue_recovery(struct ahci_channel *ch); 92 static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb); 93 static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb); 94 95 static void ahciaction(struct cam_sim *sim, union ccb *ccb); 96 static void ahcipoll(struct cam_sim *sim); 97 98 static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers"); 99 100 #define recovery_type spriv_field0 101 #define RECOVERY_NONE 0 102 #define RECOVERY_READ_LOG 1 103 #define RECOVERY_REQUEST_SENSE 2 104 #define recovery_slot spriv_field1 105 106 static uint32_t 107 ahci_ch_detval(struct ahci_channel *ch, uint32_t val) 108 { 109 110 return ch->disablephy ? ATA_SC_DET_DISABLE : val; 111 } 112 113 int 114 ahci_ctlr_setup(device_t dev) 115 { 116 struct ahci_controller *ctlr = device_get_softc(dev); 117 /* Clear interrupts */ 118 ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS)); 119 /* Configure CCC */ 120 if (ctlr->ccc) { 121 ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI)); 122 ATA_OUTL(ctlr->r_mem, AHCI_CCCC, 123 (ctlr->ccc << AHCI_CCCC_TV_SHIFT) | 124 (4 << AHCI_CCCC_CC_SHIFT) | 125 AHCI_CCCC_EN); 126 ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) & 127 AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT; 128 if (bootverbose) { 129 device_printf(dev, 130 "CCC with %dms/4cmd enabled on vector %d\n", 131 ctlr->ccc, ctlr->cccv); 132 } 133 } 134 /* Enable AHCI interrupts */ 135 ATA_OUTL(ctlr->r_mem, AHCI_GHC, 136 ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE); 137 return (0); 138 } 139 140 int 141 ahci_ctlr_reset(device_t dev) 142 { 143 struct ahci_controller *ctlr = device_get_softc(dev); 144 int timeout; 145 146 /* Enable AHCI mode */ 147 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); 148 /* Reset AHCI controller */ 149 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR); 150 for (timeout = 1000; timeout > 0; timeout--) { 151 DELAY(1000); 152 if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0) 153 break; 154 } 155 if (timeout == 0) { 156 device_printf(dev, "AHCI controller reset failure\n"); 157 return (ENXIO); 158 } 159 /* Reenable AHCI mode */ 160 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); 161 162 if (ctlr->quirks & AHCI_Q_RESTORE_CAP) { 163 /* 164 * Restore capability field. 165 * This is write to a read-only register to restore its state. 166 * On fully standard-compliant hardware this is not needed and 167 * this operation shall not take place. See ahci_pci.c for 168 * platforms using this quirk. 169 */ 170 ATA_OUTL(ctlr->r_mem, AHCI_CAP, ctlr->caps); 171 } 172 173 return (0); 174 } 175 176 177 int 178 ahci_attach(device_t dev) 179 { 180 struct ahci_controller *ctlr = device_get_softc(dev); 181 int error, i, speed, unit; 182 uint32_t u, version; 183 device_t child; 184 185 ctlr->dev = dev; 186 ctlr->ccc = 0; 187 resource_int_value(device_get_name(dev), 188 device_get_unit(dev), "ccc", &ctlr->ccc); 189 mtx_init(&ctlr->ch_mtx, "AHCI channels lock", NULL, MTX_DEF); 190 191 /* Setup our own memory management for channels. */ 192 ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); 193 ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); 194 ctlr->sc_iomem.rm_type = RMAN_ARRAY; 195 ctlr->sc_iomem.rm_descr = "I/O memory addresses"; 196 if ((error = rman_init(&ctlr->sc_iomem)) != 0) { 197 ahci_free_mem(dev); 198 return (error); 199 } 200 if ((error = rman_manage_region(&ctlr->sc_iomem, 201 rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { 202 ahci_free_mem(dev); 203 rman_fini(&ctlr->sc_iomem); 204 return (error); 205 } 206 /* Get the HW capabilities */ 207 version = ATA_INL(ctlr->r_mem, AHCI_VS); 208 ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP); 209 if (version >= 0x00010200) 210 ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2); 211 if (ctlr->caps & AHCI_CAP_EMS) 212 ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL); 213 214 if (ctlr->quirks & AHCI_Q_FORCE_PI) { 215 /* 216 * Enable ports. 217 * The spec says that BIOS sets up bits corresponding to 218 * available ports. On platforms where this information 219 * is missing, the driver can define available ports on its own. 220 */ 221 int nports = (ctlr->caps & AHCI_CAP_NPMASK) + 1; 222 int nmask = (1 << nports) - 1; 223 224 ATA_OUTL(ctlr->r_mem, AHCI_PI, nmask); 225 device_printf(dev, "Forcing PI to %d ports (mask = %x)\n", 226 nports, nmask); 227 } 228 229 ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI); 230 231 /* Identify and set separate quirks for HBA and RAID f/w Marvells. */ 232 if ((ctlr->quirks & AHCI_Q_ALTSIG) && 233 (ctlr->caps & AHCI_CAP_SPM) == 0) 234 ctlr->quirks |= AHCI_Q_NOBSYRES; 235 236 if (ctlr->quirks & AHCI_Q_1CH) { 237 ctlr->caps &= ~AHCI_CAP_NPMASK; 238 ctlr->ichannels &= 0x01; 239 } 240 if (ctlr->quirks & AHCI_Q_2CH) { 241 ctlr->caps &= ~AHCI_CAP_NPMASK; 242 ctlr->caps |= 1; 243 ctlr->ichannels &= 0x03; 244 } 245 if (ctlr->quirks & AHCI_Q_4CH) { 246 ctlr->caps &= ~AHCI_CAP_NPMASK; 247 ctlr->caps |= 3; 248 ctlr->ichannels &= 0x0f; 249 } 250 ctlr->channels = MAX(flsl(ctlr->ichannels), 251 (ctlr->caps & AHCI_CAP_NPMASK) + 1); 252 if (ctlr->quirks & AHCI_Q_NOPMP) 253 ctlr->caps &= ~AHCI_CAP_SPM; 254 if (ctlr->quirks & AHCI_Q_NONCQ) 255 ctlr->caps &= ~AHCI_CAP_SNCQ; 256 if ((ctlr->caps & AHCI_CAP_CCCS) == 0) 257 ctlr->ccc = 0; 258 ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC); 259 260 /* Create controller-wide DMA tag. */ 261 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 262 (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR : 263 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 264 BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 265 ctlr->dma_coherent ? BUS_DMA_COHERENT : 0, NULL, NULL, 266 &ctlr->dma_tag)) { 267 ahci_free_mem(dev); 268 rman_fini(&ctlr->sc_iomem); 269 return (ENXIO); 270 } 271 272 ahci_ctlr_setup(dev); 273 274 /* Setup interrupts. */ 275 if ((error = ahci_setup_interrupt(dev)) != 0) { 276 bus_dma_tag_destroy(ctlr->dma_tag); 277 ahci_free_mem(dev); 278 rman_fini(&ctlr->sc_iomem); 279 return (error); 280 } 281 282 i = 0; 283 for (u = ctlr->ichannels; u != 0; u >>= 1) 284 i += (u & 1); 285 ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3)); 286 resource_int_value(device_get_name(dev), device_get_unit(dev), 287 "direct", &ctlr->direct); 288 /* Announce HW capabilities. */ 289 speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT; 290 device_printf(dev, 291 "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n", 292 ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f), 293 ((version >> 4) & 0xf0) + (version & 0x0f), 294 (ctlr->caps & AHCI_CAP_NPMASK) + 1, 295 ((speed == 1) ? "1.5":((speed == 2) ? "3": 296 ((speed == 3) ? "6":"?"))), 297 (ctlr->caps & AHCI_CAP_SPM) ? 298 "supported" : "not supported", 299 (ctlr->caps & AHCI_CAP_FBSS) ? 300 " with FBS" : ""); 301 if (ctlr->quirks != 0) { 302 device_printf(dev, "quirks=0x%b\n", ctlr->quirks, 303 AHCI_Q_BIT_STRING); 304 } 305 if (bootverbose) { 306 device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps", 307 (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"", 308 (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"", 309 (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"", 310 (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"", 311 (ctlr->caps & AHCI_CAP_SSS) ? " SS":"", 312 (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"", 313 (ctlr->caps & AHCI_CAP_SAL) ? " AL":"", 314 (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"", 315 ((speed == 1) ? "1.5":((speed == 2) ? "3": 316 ((speed == 3) ? "6":"?")))); 317 printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n", 318 (ctlr->caps & AHCI_CAP_SAM) ? " AM":"", 319 (ctlr->caps & AHCI_CAP_SPM) ? " PM":"", 320 (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"", 321 (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"", 322 (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"", 323 (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"", 324 ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1, 325 (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"", 326 (ctlr->caps & AHCI_CAP_EMS) ? " EM":"", 327 (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"", 328 (ctlr->caps & AHCI_CAP_NPMASK) + 1); 329 } 330 if (bootverbose && version >= 0x00010200) { 331 device_printf(dev, "Caps2:%s%s%s%s%s%s\n", 332 (ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"", 333 (ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"", 334 (ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"", 335 (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"", 336 (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"", 337 (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":""); 338 } 339 /* Attach all channels on this controller */ 340 for (unit = 0; unit < ctlr->channels; unit++) { 341 child = device_add_child(dev, "ahcich", -1); 342 if (child == NULL) { 343 device_printf(dev, "failed to add channel device\n"); 344 continue; 345 } 346 device_set_ivars(child, (void *)(intptr_t)unit); 347 if ((ctlr->ichannels & (1 << unit)) == 0) 348 device_disable(child); 349 } 350 /* Attach any remapped NVME device */ 351 for (; unit < ctlr->channels + ctlr->remapped_devices; unit++) { 352 child = device_add_child(dev, "nvme", -1); 353 if (child == NULL) { 354 device_printf(dev, "failed to add remapped NVMe device"); 355 continue; 356 } 357 device_set_ivars(child, (void *)(intptr_t)(unit | AHCI_REMAPPED_UNIT)); 358 } 359 360 if (ctlr->caps & AHCI_CAP_EMS) { 361 child = device_add_child(dev, "ahciem", -1); 362 if (child == NULL) 363 device_printf(dev, "failed to add enclosure device\n"); 364 else 365 device_set_ivars(child, (void *)(intptr_t)-1); 366 } 367 bus_generic_attach(dev); 368 return (0); 369 } 370 371 int 372 ahci_detach(device_t dev) 373 { 374 struct ahci_controller *ctlr = device_get_softc(dev); 375 int i; 376 377 /* Detach & delete all children */ 378 device_delete_children(dev); 379 380 /* Free interrupts. */ 381 for (i = 0; i < ctlr->numirqs; i++) { 382 if (ctlr->irqs[i].r_irq) { 383 bus_teardown_intr(dev, ctlr->irqs[i].r_irq, 384 ctlr->irqs[i].handle); 385 bus_release_resource(dev, SYS_RES_IRQ, 386 ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq); 387 } 388 } 389 bus_dma_tag_destroy(ctlr->dma_tag); 390 /* Free memory. */ 391 rman_fini(&ctlr->sc_iomem); 392 ahci_free_mem(dev); 393 mtx_destroy(&ctlr->ch_mtx); 394 return (0); 395 } 396 397 void 398 ahci_free_mem(device_t dev) 399 { 400 struct ahci_controller *ctlr = device_get_softc(dev); 401 402 /* Release memory resources */ 403 if (ctlr->r_mem) 404 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); 405 if (ctlr->r_msix_table) 406 bus_release_resource(dev, SYS_RES_MEMORY, 407 ctlr->r_msix_tab_rid, ctlr->r_msix_table); 408 if (ctlr->r_msix_pba) 409 bus_release_resource(dev, SYS_RES_MEMORY, 410 ctlr->r_msix_pba_rid, ctlr->r_msix_pba); 411 412 ctlr->r_msix_pba = ctlr->r_mem = ctlr->r_msix_table = NULL; 413 } 414 415 int 416 ahci_setup_interrupt(device_t dev) 417 { 418 struct ahci_controller *ctlr = device_get_softc(dev); 419 int i; 420 421 /* Check for single MSI vector fallback. */ 422 if (ctlr->numirqs > 1 && 423 (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) { 424 device_printf(dev, "Falling back to one MSI\n"); 425 ctlr->numirqs = 1; 426 } 427 428 /* Ensure we don't overrun irqs. */ 429 if (ctlr->numirqs > AHCI_MAX_IRQS) { 430 device_printf(dev, "Too many irqs %d > %d (clamping)\n", 431 ctlr->numirqs, AHCI_MAX_IRQS); 432 ctlr->numirqs = AHCI_MAX_IRQS; 433 } 434 435 /* Allocate all IRQs. */ 436 for (i = 0; i < ctlr->numirqs; i++) { 437 ctlr->irqs[i].ctlr = ctlr; 438 ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0); 439 if (ctlr->channels == 1 && !ctlr->ccc && ctlr->msi) 440 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; 441 else if (ctlr->numirqs == 1 || i >= ctlr->channels || 442 (ctlr->ccc && i == ctlr->cccv)) 443 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL; 444 else if (ctlr->channels > ctlr->numirqs && 445 i == ctlr->numirqs - 1) 446 ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER; 447 else 448 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; 449 if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 450 &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { 451 device_printf(dev, "unable to map interrupt\n"); 452 return (ENXIO); 453 } 454 if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL, 455 (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr : 456 ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge : 457 ahci_intr_one), 458 &ctlr->irqs[i], &ctlr->irqs[i].handle))) { 459 /* SOS XXX release r_irq */ 460 device_printf(dev, "unable to setup interrupt\n"); 461 return (ENXIO); 462 } 463 if (ctlr->numirqs > 1) { 464 bus_describe_intr(dev, ctlr->irqs[i].r_irq, 465 ctlr->irqs[i].handle, 466 ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ? 467 "ch%d" : "%d", i); 468 } 469 } 470 return (0); 471 } 472 473 /* 474 * Common case interrupt handler. 475 */ 476 static void 477 ahci_intr(void *data) 478 { 479 struct ahci_controller_irq *irq = data; 480 struct ahci_controller *ctlr = irq->ctlr; 481 u_int32_t is, ise = 0; 482 void *arg; 483 int unit; 484 485 if (irq->mode == AHCI_IRQ_MODE_ALL) { 486 unit = 0; 487 if (ctlr->ccc) 488 is = ctlr->ichannels; 489 else 490 is = ATA_INL(ctlr->r_mem, AHCI_IS); 491 } else { /* AHCI_IRQ_MODE_AFTER */ 492 unit = irq->r_irq_rid - 1; 493 is = ATA_INL(ctlr->r_mem, AHCI_IS); 494 is &= (0xffffffff << unit); 495 } 496 /* CCC interrupt is edge triggered. */ 497 if (ctlr->ccc) 498 ise = 1 << ctlr->cccv; 499 /* Some controllers have edge triggered IS. */ 500 if (ctlr->quirks & AHCI_Q_EDGEIS) 501 ise |= is; 502 if (ise != 0) 503 ATA_OUTL(ctlr->r_mem, AHCI_IS, ise); 504 for (; unit < ctlr->channels; unit++) { 505 if ((is & (1 << unit)) != 0 && 506 (arg = ctlr->interrupt[unit].argument)) { 507 ctlr->interrupt[unit].function(arg); 508 } 509 } 510 for (; unit < ctlr->channels + ctlr->remapped_devices; unit++) { 511 if ((arg = ctlr->interrupt[unit].argument)) { 512 ctlr->interrupt[unit].function(arg); 513 } 514 } 515 516 /* AHCI declares level triggered IS. */ 517 if (!(ctlr->quirks & AHCI_Q_EDGEIS)) 518 ATA_OUTL(ctlr->r_mem, AHCI_IS, is); 519 ATA_RBL(ctlr->r_mem, AHCI_IS); 520 } 521 522 /* 523 * Simplified interrupt handler for multivector MSI mode. 524 */ 525 static void 526 ahci_intr_one(void *data) 527 { 528 struct ahci_controller_irq *irq = data; 529 struct ahci_controller *ctlr = irq->ctlr; 530 void *arg; 531 int unit; 532 533 unit = irq->r_irq_rid - 1; 534 if ((arg = ctlr->interrupt[unit].argument)) 535 ctlr->interrupt[unit].function(arg); 536 /* AHCI declares level triggered IS. */ 537 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); 538 ATA_RBL(ctlr->r_mem, AHCI_IS); 539 } 540 541 static void 542 ahci_intr_one_edge(void *data) 543 { 544 struct ahci_controller_irq *irq = data; 545 struct ahci_controller *ctlr = irq->ctlr; 546 void *arg; 547 int unit; 548 549 unit = irq->r_irq_rid - 1; 550 /* Some controllers have edge triggered IS. */ 551 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); 552 if ((arg = ctlr->interrupt[unit].argument)) 553 ctlr->interrupt[unit].function(arg); 554 ATA_RBL(ctlr->r_mem, AHCI_IS); 555 } 556 557 struct resource * 558 ahci_alloc_resource(device_t dev, device_t child, int type, int *rid, 559 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 560 { 561 struct ahci_controller *ctlr = device_get_softc(dev); 562 struct resource *res; 563 rman_res_t st; 564 int offset, size, unit; 565 bool is_remapped; 566 567 unit = (intptr_t)device_get_ivars(child); 568 if (unit & AHCI_REMAPPED_UNIT) { 569 unit &= ~AHCI_REMAPPED_UNIT; 570 unit -= ctlr->channels; 571 is_remapped = true; 572 } else 573 is_remapped = false; 574 res = NULL; 575 switch (type) { 576 case SYS_RES_MEMORY: 577 if (is_remapped) { 578 offset = ctlr->remap_offset + unit * ctlr->remap_size; 579 size = ctlr->remap_size; 580 } 581 else if (unit >= 0) { 582 offset = AHCI_OFFSET + (unit << 7); 583 size = 128; 584 } else if (*rid == 0) { 585 offset = AHCI_EM_CTL; 586 size = 4; 587 } else { 588 offset = (ctlr->emloc & 0xffff0000) >> 14; 589 size = (ctlr->emloc & 0x0000ffff) << 2; 590 if (*rid != 1) { 591 if (*rid == 2 && (ctlr->capsem & 592 (AHCI_EM_XMT | AHCI_EM_SMB)) == 0) 593 offset += size; 594 else 595 break; 596 } 597 } 598 st = rman_get_start(ctlr->r_mem); 599 res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, 600 st + offset + size - 1, size, RF_ACTIVE, child); 601 if (res) { 602 bus_space_handle_t bsh; 603 bus_space_tag_t bst; 604 bsh = rman_get_bushandle(ctlr->r_mem); 605 bst = rman_get_bustag(ctlr->r_mem); 606 bus_space_subregion(bst, bsh, offset, 128, &bsh); 607 rman_set_bushandle(res, bsh); 608 rman_set_bustag(res, bst); 609 } 610 break; 611 case SYS_RES_IRQ: 612 if (*rid == ATA_IRQ_RID) 613 res = ctlr->irqs[0].r_irq; 614 break; 615 } 616 return (res); 617 } 618 619 int 620 ahci_release_resource(device_t dev, device_t child, int type, int rid, 621 struct resource *r) 622 { 623 624 switch (type) { 625 case SYS_RES_MEMORY: 626 rman_release_resource(r); 627 return (0); 628 case SYS_RES_IRQ: 629 if (rid != ATA_IRQ_RID) 630 return (ENOENT); 631 return (0); 632 } 633 return (EINVAL); 634 } 635 636 int 637 ahci_setup_intr(device_t dev, device_t child, struct resource *irq, 638 int flags, driver_filter_t *filter, driver_intr_t *function, 639 void *argument, void **cookiep) 640 { 641 struct ahci_controller *ctlr = device_get_softc(dev); 642 int unit = (intptr_t)device_get_ivars(child) & ~AHCI_REMAPPED_UNIT; 643 644 if (filter != NULL) { 645 printf("ahci.c: we cannot use a filter here\n"); 646 return (EINVAL); 647 } 648 ctlr->interrupt[unit].function = function; 649 ctlr->interrupt[unit].argument = argument; 650 return (0); 651 } 652 653 int 654 ahci_teardown_intr(device_t dev, device_t child, struct resource *irq, 655 void *cookie) 656 { 657 struct ahci_controller *ctlr = device_get_softc(dev); 658 int unit = (intptr_t)device_get_ivars(child) & ~AHCI_REMAPPED_UNIT; 659 660 ctlr->interrupt[unit].function = NULL; 661 ctlr->interrupt[unit].argument = NULL; 662 return (0); 663 } 664 665 int 666 ahci_print_child(device_t dev, device_t child) 667 { 668 int retval, channel; 669 670 retval = bus_print_child_header(dev, child); 671 channel = (int)(intptr_t)device_get_ivars(child) & ~AHCI_REMAPPED_UNIT; 672 if (channel >= 0) 673 retval += printf(" at channel %d", channel); 674 retval += bus_print_child_footer(dev, child); 675 return (retval); 676 } 677 678 int 679 ahci_child_location_str(device_t dev, device_t child, char *buf, 680 size_t buflen) 681 { 682 int channel; 683 684 channel = (int)(intptr_t)device_get_ivars(child) & ~AHCI_REMAPPED_UNIT; 685 if (channel >= 0) 686 snprintf(buf, buflen, "channel=%d", channel); 687 return (0); 688 } 689 690 bus_dma_tag_t 691 ahci_get_dma_tag(device_t dev, device_t child) 692 { 693 struct ahci_controller *ctlr = device_get_softc(dev); 694 695 return (ctlr->dma_tag); 696 } 697 698 void 699 ahci_attached(device_t dev, struct ahci_channel *ch) 700 { 701 struct ahci_controller *ctlr = device_get_softc(dev); 702 703 mtx_lock(&ctlr->ch_mtx); 704 ctlr->ch[ch->unit] = ch; 705 mtx_unlock(&ctlr->ch_mtx); 706 } 707 708 void 709 ahci_detached(device_t dev, struct ahci_channel *ch) 710 { 711 struct ahci_controller *ctlr = device_get_softc(dev); 712 713 mtx_lock(&ctlr->ch_mtx); 714 mtx_lock(&ch->mtx); 715 ctlr->ch[ch->unit] = NULL; 716 mtx_unlock(&ch->mtx); 717 mtx_unlock(&ctlr->ch_mtx); 718 } 719 720 struct ahci_channel * 721 ahci_getch(device_t dev, int n) 722 { 723 struct ahci_controller *ctlr = device_get_softc(dev); 724 struct ahci_channel *ch; 725 726 KASSERT(n >= 0 && n < AHCI_MAX_PORTS, ("Bad channel number %d", n)); 727 mtx_lock(&ctlr->ch_mtx); 728 ch = ctlr->ch[n]; 729 if (ch != NULL) 730 mtx_lock(&ch->mtx); 731 mtx_unlock(&ctlr->ch_mtx); 732 return (ch); 733 } 734 735 void 736 ahci_putch(struct ahci_channel *ch) 737 { 738 739 mtx_unlock(&ch->mtx); 740 } 741 742 static int 743 ahci_ch_probe(device_t dev) 744 { 745 746 device_set_desc_copy(dev, "AHCI channel"); 747 return (BUS_PROBE_DEFAULT); 748 } 749 750 static int 751 ahci_ch_disablephy_proc(SYSCTL_HANDLER_ARGS) 752 { 753 struct ahci_channel *ch; 754 int error, value; 755 756 ch = arg1; 757 value = ch->disablephy; 758 error = sysctl_handle_int(oidp, &value, 0, req); 759 if (error != 0 || req->newptr == NULL || (value != 0 && value != 1)) 760 return (error); 761 762 mtx_lock(&ch->mtx); 763 ch->disablephy = value; 764 if (value) { 765 ahci_ch_deinit(ch->dev); 766 } else { 767 ahci_ch_init(ch->dev); 768 ahci_phy_check_events(ch, ATA_SE_PHY_CHANGED | ATA_SE_EXCHANGED); 769 } 770 mtx_unlock(&ch->mtx); 771 772 return (0); 773 } 774 775 static int 776 ahci_ch_attach(device_t dev) 777 { 778 struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev)); 779 struct ahci_channel *ch = device_get_softc(dev); 780 struct cam_devq *devq; 781 struct sysctl_ctx_list *ctx; 782 struct sysctl_oid *tree; 783 int rid, error, i, sata_rev = 0; 784 u_int32_t version; 785 786 ch->dev = dev; 787 ch->unit = (intptr_t)device_get_ivars(dev); 788 ch->caps = ctlr->caps; 789 ch->caps2 = ctlr->caps2; 790 ch->start = ctlr->ch_start; 791 ch->quirks = ctlr->quirks; 792 ch->vendorid = ctlr->vendorid; 793 ch->deviceid = ctlr->deviceid; 794 ch->subvendorid = ctlr->subvendorid; 795 ch->subdeviceid = ctlr->subdeviceid; 796 ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1; 797 mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF); 798 ch->pm_level = 0; 799 resource_int_value(device_get_name(dev), 800 device_get_unit(dev), "pm_level", &ch->pm_level); 801 STAILQ_INIT(&ch->doneq); 802 if (ch->pm_level > 3) 803 callout_init_mtx(&ch->pm_timer, &ch->mtx, 0); 804 callout_init_mtx(&ch->reset_timer, &ch->mtx, 0); 805 /* JMicron external ports (0) sometimes limited */ 806 if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0) 807 sata_rev = 1; 808 if (ch->quirks & AHCI_Q_SATA2) 809 sata_rev = 2; 810 resource_int_value(device_get_name(dev), 811 device_get_unit(dev), "sata_rev", &sata_rev); 812 for (i = 0; i < 16; i++) { 813 ch->user[i].revision = sata_rev; 814 ch->user[i].mode = 0; 815 ch->user[i].bytecount = 8192; 816 ch->user[i].tags = ch->numslots; 817 ch->user[i].caps = 0; 818 ch->curr[i] = ch->user[i]; 819 if (ch->pm_level) { 820 ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ | 821 CTS_SATA_CAPS_H_APST | 822 CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST; 823 } 824 ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA | 825 CTS_SATA_CAPS_H_AN; 826 } 827 rid = 0; 828 if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 829 &rid, RF_ACTIVE))) 830 return (ENXIO); 831 ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD); 832 version = ATA_INL(ctlr->r_mem, AHCI_VS); 833 if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS)) 834 ch->chcaps |= AHCI_P_CMD_FBSCP; 835 if (ch->caps2 & AHCI_CAP2_SDS) 836 ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP); 837 if (bootverbose) { 838 device_printf(dev, "Caps:%s%s%s%s%s%s\n", 839 (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"", 840 (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"", 841 (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"", 842 (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"", 843 (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"", 844 (ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":""); 845 } 846 ahci_dmainit(dev); 847 ahci_slotsalloc(dev); 848 mtx_lock(&ch->mtx); 849 ahci_ch_init(dev); 850 rid = ATA_IRQ_RID; 851 if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 852 &rid, RF_SHAREABLE | RF_ACTIVE))) { 853 device_printf(dev, "Unable to map interrupt\n"); 854 error = ENXIO; 855 goto err0; 856 } 857 if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 858 ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr, 859 ch, &ch->ih))) { 860 device_printf(dev, "Unable to setup interrupt\n"); 861 error = ENXIO; 862 goto err1; 863 } 864 /* Create the device queue for our SIM. */ 865 devq = cam_simq_alloc(ch->numslots); 866 if (devq == NULL) { 867 device_printf(dev, "Unable to allocate simq\n"); 868 error = ENOMEM; 869 goto err1; 870 } 871 /* Construct SIM entry */ 872 ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch, 873 device_get_unit(dev), (struct mtx *)&ch->mtx, 874 (ch->quirks & AHCI_Q_NOCCS) ? 1 : min(2, ch->numslots), 875 (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0, 876 devq); 877 if (ch->sim == NULL) { 878 cam_simq_free(devq); 879 device_printf(dev, "unable to allocate sim\n"); 880 error = ENOMEM; 881 goto err1; 882 } 883 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { 884 device_printf(dev, "unable to register xpt bus\n"); 885 error = ENXIO; 886 goto err2; 887 } 888 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), 889 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 890 device_printf(dev, "unable to create path\n"); 891 error = ENXIO; 892 goto err3; 893 } 894 if (ch->pm_level > 3) { 895 callout_reset(&ch->pm_timer, 896 (ch->pm_level == 4) ? hz / 1000 : hz / 8, 897 ahci_ch_pm, ch); 898 } 899 mtx_unlock(&ch->mtx); 900 ahci_attached(device_get_parent(dev), ch); 901 ctx = device_get_sysctl_ctx(dev); 902 tree = device_get_sysctl_tree(dev); 903 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "disable_phy", 904 CTLFLAG_RW | CTLTYPE_UINT, ch, 0, ahci_ch_disablephy_proc, "IU", 905 "Disable PHY"); 906 return (0); 907 908 err3: 909 xpt_bus_deregister(cam_sim_path(ch->sim)); 910 err2: 911 cam_sim_free(ch->sim, /*free_devq*/TRUE); 912 err1: 913 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 914 err0: 915 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); 916 mtx_unlock(&ch->mtx); 917 mtx_destroy(&ch->mtx); 918 return (error); 919 } 920 921 static int 922 ahci_ch_detach(device_t dev) 923 { 924 struct ahci_channel *ch = device_get_softc(dev); 925 926 ahci_detached(device_get_parent(dev), ch); 927 mtx_lock(&ch->mtx); 928 xpt_async(AC_LOST_DEVICE, ch->path, NULL); 929 /* Forget about reset. */ 930 if (ch->resetting) { 931 ch->resetting = 0; 932 xpt_release_simq(ch->sim, TRUE); 933 } 934 xpt_free_path(ch->path); 935 xpt_bus_deregister(cam_sim_path(ch->sim)); 936 cam_sim_free(ch->sim, /*free_devq*/TRUE); 937 mtx_unlock(&ch->mtx); 938 939 if (ch->pm_level > 3) 940 callout_drain(&ch->pm_timer); 941 callout_drain(&ch->reset_timer); 942 bus_teardown_intr(dev, ch->r_irq, ch->ih); 943 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 944 945 ahci_ch_deinit(dev); 946 ahci_slotsfree(dev); 947 ahci_dmafini(dev); 948 949 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); 950 mtx_destroy(&ch->mtx); 951 return (0); 952 } 953 954 static int 955 ahci_ch_init(device_t dev) 956 { 957 struct ahci_channel *ch = device_get_softc(dev); 958 uint64_t work; 959 960 /* Disable port interrupts */ 961 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 962 /* Setup work areas */ 963 work = ch->dma.work_bus + AHCI_CL_OFFSET; 964 ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff); 965 ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32); 966 work = ch->dma.rfis_bus; 967 ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff); 968 ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32); 969 /* Activate the channel and power/spin up device */ 970 ATA_OUTL(ch->r_mem, AHCI_P_CMD, 971 (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD | 972 ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) | 973 ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 ))); 974 ahci_start_fr(ch); 975 ahci_start(ch, 1); 976 return (0); 977 } 978 979 static int 980 ahci_ch_deinit(device_t dev) 981 { 982 struct ahci_channel *ch = device_get_softc(dev); 983 984 /* Disable port interrupts. */ 985 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 986 /* Reset command register. */ 987 ahci_stop(ch); 988 ahci_stop_fr(ch); 989 ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0); 990 /* Allow everything, including partial and slumber modes. */ 991 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0); 992 /* Request slumber mode transition and give some time to get there. */ 993 ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER); 994 DELAY(100); 995 /* Disable PHY. */ 996 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); 997 return (0); 998 } 999 1000 static int 1001 ahci_ch_suspend(device_t dev) 1002 { 1003 struct ahci_channel *ch = device_get_softc(dev); 1004 1005 mtx_lock(&ch->mtx); 1006 xpt_freeze_simq(ch->sim, 1); 1007 /* Forget about reset. */ 1008 if (ch->resetting) { 1009 ch->resetting = 0; 1010 callout_stop(&ch->reset_timer); 1011 xpt_release_simq(ch->sim, TRUE); 1012 } 1013 while (ch->oslots) 1014 msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100); 1015 ahci_ch_deinit(dev); 1016 mtx_unlock(&ch->mtx); 1017 return (0); 1018 } 1019 1020 static int 1021 ahci_ch_resume(device_t dev) 1022 { 1023 struct ahci_channel *ch = device_get_softc(dev); 1024 1025 mtx_lock(&ch->mtx); 1026 ahci_ch_init(dev); 1027 ahci_reset(ch); 1028 xpt_release_simq(ch->sim, TRUE); 1029 mtx_unlock(&ch->mtx); 1030 return (0); 1031 } 1032 1033 devclass_t ahcich_devclass; 1034 static device_method_t ahcich_methods[] = { 1035 DEVMETHOD(device_probe, ahci_ch_probe), 1036 DEVMETHOD(device_attach, ahci_ch_attach), 1037 DEVMETHOD(device_detach, ahci_ch_detach), 1038 DEVMETHOD(device_suspend, ahci_ch_suspend), 1039 DEVMETHOD(device_resume, ahci_ch_resume), 1040 DEVMETHOD_END 1041 }; 1042 static driver_t ahcich_driver = { 1043 "ahcich", 1044 ahcich_methods, 1045 sizeof(struct ahci_channel) 1046 }; 1047 DRIVER_MODULE(ahcich, ahci, ahcich_driver, ahcich_devclass, NULL, NULL); 1048 1049 struct ahci_dc_cb_args { 1050 bus_addr_t maddr; 1051 int error; 1052 }; 1053 1054 static void 1055 ahci_dmainit(device_t dev) 1056 { 1057 struct ahci_channel *ch = device_get_softc(dev); 1058 struct ahci_dc_cb_args dcba; 1059 size_t rfsize; 1060 int error; 1061 1062 /* Command area. */ 1063 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, 1064 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1065 NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE, 1066 0, NULL, NULL, &ch->dma.work_tag); 1067 if (error != 0) 1068 goto error; 1069 error = bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, 1070 BUS_DMA_ZERO, &ch->dma.work_map); 1071 if (error != 0) 1072 goto error; 1073 error = bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, 1074 AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT); 1075 if (error != 0 || (error = dcba.error) != 0) { 1076 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); 1077 goto error; 1078 } 1079 ch->dma.work_bus = dcba.maddr; 1080 /* FIS receive area. */ 1081 if (ch->chcaps & AHCI_P_CMD_FBSCP) 1082 rfsize = 4096; 1083 else 1084 rfsize = 256; 1085 error = bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0, 1086 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1087 NULL, NULL, rfsize, 1, rfsize, 1088 0, NULL, NULL, &ch->dma.rfis_tag); 1089 if (error != 0) 1090 goto error; 1091 error = bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0, 1092 &ch->dma.rfis_map); 1093 if (error != 0) 1094 goto error; 1095 error = bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis, 1096 rfsize, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT); 1097 if (error != 0 || (error = dcba.error) != 0) { 1098 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); 1099 goto error; 1100 } 1101 ch->dma.rfis_bus = dcba.maddr; 1102 /* Data area. */ 1103 error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, 1104 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1105 NULL, NULL, 1106 AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots, 1107 AHCI_SG_ENTRIES, AHCI_PRD_MAX, 1108 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag); 1109 if (error != 0) 1110 goto error; 1111 return; 1112 1113 error: 1114 device_printf(dev, "WARNING - DMA initialization failed, error %d\n", 1115 error); 1116 ahci_dmafini(dev); 1117 } 1118 1119 static void 1120 ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 1121 { 1122 struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc; 1123 1124 if (!(dcba->error = error)) 1125 dcba->maddr = segs[0].ds_addr; 1126 } 1127 1128 static void 1129 ahci_dmafini(device_t dev) 1130 { 1131 struct ahci_channel *ch = device_get_softc(dev); 1132 1133 if (ch->dma.data_tag) { 1134 bus_dma_tag_destroy(ch->dma.data_tag); 1135 ch->dma.data_tag = NULL; 1136 } 1137 if (ch->dma.rfis_bus) { 1138 bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map); 1139 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); 1140 ch->dma.rfis_bus = 0; 1141 ch->dma.rfis = NULL; 1142 } 1143 if (ch->dma.work_bus) { 1144 bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); 1145 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); 1146 ch->dma.work_bus = 0; 1147 ch->dma.work = NULL; 1148 } 1149 if (ch->dma.work_tag) { 1150 bus_dma_tag_destroy(ch->dma.work_tag); 1151 ch->dma.work_tag = NULL; 1152 } 1153 } 1154 1155 static void 1156 ahci_slotsalloc(device_t dev) 1157 { 1158 struct ahci_channel *ch = device_get_softc(dev); 1159 int i; 1160 1161 /* Alloc and setup command/dma slots */ 1162 bzero(ch->slot, sizeof(ch->slot)); 1163 for (i = 0; i < ch->numslots; i++) { 1164 struct ahci_slot *slot = &ch->slot[i]; 1165 1166 slot->ch = ch; 1167 slot->slot = i; 1168 slot->state = AHCI_SLOT_EMPTY; 1169 slot->ccb = NULL; 1170 callout_init_mtx(&slot->timeout, &ch->mtx, 0); 1171 1172 if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) 1173 device_printf(ch->dev, "FAILURE - create data_map\n"); 1174 } 1175 } 1176 1177 static void 1178 ahci_slotsfree(device_t dev) 1179 { 1180 struct ahci_channel *ch = device_get_softc(dev); 1181 int i; 1182 1183 /* Free all dma slots */ 1184 for (i = 0; i < ch->numslots; i++) { 1185 struct ahci_slot *slot = &ch->slot[i]; 1186 1187 callout_drain(&slot->timeout); 1188 if (slot->dma.data_map) { 1189 bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); 1190 slot->dma.data_map = NULL; 1191 } 1192 } 1193 } 1194 1195 static int 1196 ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr) 1197 { 1198 1199 if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) || 1200 ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) { 1201 u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS); 1202 union ccb *ccb; 1203 1204 if (bootverbose) { 1205 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) 1206 device_printf(ch->dev, "CONNECT requested\n"); 1207 else 1208 device_printf(ch->dev, "DISCONNECT requested\n"); 1209 } 1210 ahci_reset(ch); 1211 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 1212 return (0); 1213 if (xpt_create_path(&ccb->ccb_h.path, NULL, 1214 cam_sim_path(ch->sim), 1215 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1216 xpt_free_ccb(ccb); 1217 return (0); 1218 } 1219 xpt_rescan(ccb); 1220 return (1); 1221 } 1222 return (0); 1223 } 1224 1225 static void 1226 ahci_cpd_check_events(struct ahci_channel *ch) 1227 { 1228 u_int32_t status; 1229 union ccb *ccb; 1230 device_t dev; 1231 1232 if (ch->pm_level == 0) 1233 return; 1234 1235 status = ATA_INL(ch->r_mem, AHCI_P_CMD); 1236 if ((status & AHCI_P_CMD_CPD) == 0) 1237 return; 1238 1239 if (bootverbose) { 1240 dev = ch->dev; 1241 if (status & AHCI_P_CMD_CPS) { 1242 device_printf(dev, "COLD CONNECT requested\n"); 1243 } else 1244 device_printf(dev, "COLD DISCONNECT requested\n"); 1245 } 1246 ahci_reset(ch); 1247 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 1248 return; 1249 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), 1250 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1251 xpt_free_ccb(ccb); 1252 return; 1253 } 1254 xpt_rescan(ccb); 1255 } 1256 1257 static void 1258 ahci_notify_events(struct ahci_channel *ch, u_int32_t status) 1259 { 1260 struct cam_path *dpath; 1261 int i; 1262 1263 if (ch->caps & AHCI_CAP_SSNTF) 1264 ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status); 1265 if (bootverbose) 1266 device_printf(ch->dev, "SNTF 0x%04x\n", status); 1267 for (i = 0; i < 16; i++) { 1268 if ((status & (1 << i)) == 0) 1269 continue; 1270 if (xpt_create_path(&dpath, NULL, 1271 xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { 1272 xpt_async(AC_SCSI_AEN, dpath, NULL); 1273 xpt_free_path(dpath); 1274 } 1275 } 1276 } 1277 1278 static void 1279 ahci_done(struct ahci_channel *ch, union ccb *ccb) 1280 { 1281 1282 mtx_assert(&ch->mtx, MA_OWNED); 1283 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 || 1284 ch->batch == 0) { 1285 xpt_done(ccb); 1286 return; 1287 } 1288 1289 STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe); 1290 } 1291 1292 static void 1293 ahci_ch_intr(void *arg) 1294 { 1295 struct ahci_channel *ch = (struct ahci_channel *)arg; 1296 uint32_t istatus; 1297 1298 /* Read interrupt statuses. */ 1299 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 1300 1301 mtx_lock(&ch->mtx); 1302 ahci_ch_intr_main(ch, istatus); 1303 mtx_unlock(&ch->mtx); 1304 } 1305 1306 static void 1307 ahci_ch_intr_direct(void *arg) 1308 { 1309 struct ahci_channel *ch = (struct ahci_channel *)arg; 1310 struct ccb_hdr *ccb_h; 1311 uint32_t istatus; 1312 STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq); 1313 1314 /* Read interrupt statuses. */ 1315 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 1316 1317 mtx_lock(&ch->mtx); 1318 ch->batch = 1; 1319 ahci_ch_intr_main(ch, istatus); 1320 ch->batch = 0; 1321 /* 1322 * Prevent the possibility of issues caused by processing the queue 1323 * while unlocked below by moving the contents to a local queue. 1324 */ 1325 STAILQ_CONCAT(&tmp_doneq, &ch->doneq); 1326 mtx_unlock(&ch->mtx); 1327 while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) { 1328 STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe); 1329 xpt_done_direct((union ccb *)ccb_h); 1330 } 1331 } 1332 1333 static void 1334 ahci_ch_pm(void *arg) 1335 { 1336 struct ahci_channel *ch = (struct ahci_channel *)arg; 1337 uint32_t work; 1338 1339 if (ch->numrslots != 0) 1340 return; 1341 work = ATA_INL(ch->r_mem, AHCI_P_CMD); 1342 if (ch->pm_level == 4) 1343 work |= AHCI_P_CMD_PARTIAL; 1344 else 1345 work |= AHCI_P_CMD_SLUMBER; 1346 ATA_OUTL(ch->r_mem, AHCI_P_CMD, work); 1347 } 1348 1349 static void 1350 ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus) 1351 { 1352 uint32_t cstatus, serr = 0, sntf = 0, ok, err; 1353 enum ahci_err_type et; 1354 int i, ccs, port, reset = 0; 1355 1356 /* Clear interrupt statuses. */ 1357 ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus); 1358 /* Read command statuses. */ 1359 if (ch->numtslots != 0) 1360 cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); 1361 else 1362 cstatus = 0; 1363 if (ch->numrslots != ch->numtslots) 1364 cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI); 1365 /* Read SNTF in one of possible ways. */ 1366 if ((istatus & AHCI_P_IX_SDB) && 1367 (ch->pm_present || ch->curr[0].atapi != 0)) { 1368 if (ch->caps & AHCI_CAP_SSNTF) 1369 sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF); 1370 else if (ch->fbs_enabled) { 1371 u_int8_t *fis = ch->dma.rfis + 0x58; 1372 1373 for (i = 0; i < 16; i++) { 1374 if (fis[1] & 0x80) { 1375 fis[1] &= 0x7f; 1376 sntf |= 1 << i; 1377 } 1378 fis += 256; 1379 } 1380 } else { 1381 u_int8_t *fis = ch->dma.rfis + 0x58; 1382 1383 if (fis[1] & 0x80) 1384 sntf = (1 << (fis[1] & 0x0f)); 1385 } 1386 } 1387 /* Process PHY events */ 1388 if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF | 1389 AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { 1390 serr = ATA_INL(ch->r_mem, AHCI_P_SERR); 1391 if (serr) { 1392 ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr); 1393 reset = ahci_phy_check_events(ch, serr); 1394 } 1395 } 1396 /* Process cold presence detection events */ 1397 if ((istatus & AHCI_P_IX_CPD) && !reset) 1398 ahci_cpd_check_events(ch); 1399 /* Process command errors */ 1400 if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF | 1401 AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { 1402 if (ch->quirks & AHCI_Q_NOCCS) { 1403 /* 1404 * ASMedia chips sometimes report failed commands as 1405 * completed. Count all running commands as failed. 1406 */ 1407 cstatus |= ch->rslots; 1408 1409 /* They also report wrong CCS, so try to guess one. */ 1410 ccs = powerof2(cstatus) ? ffs(cstatus) - 1 : -1; 1411 } else { 1412 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & 1413 AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; 1414 } 1415 //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n", 1416 // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), 1417 // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs); 1418 port = -1; 1419 if (ch->fbs_enabled) { 1420 uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS); 1421 if (fbs & AHCI_P_FBS_SDE) { 1422 port = (fbs & AHCI_P_FBS_DWE) 1423 >> AHCI_P_FBS_DWE_SHIFT; 1424 } else { 1425 for (i = 0; i < 16; i++) { 1426 if (ch->numrslotspd[i] == 0) 1427 continue; 1428 if (port == -1) 1429 port = i; 1430 else if (port != i) { 1431 port = -2; 1432 break; 1433 } 1434 } 1435 } 1436 } 1437 err = ch->rslots & cstatus; 1438 } else { 1439 ccs = 0; 1440 err = 0; 1441 port = -1; 1442 } 1443 /* Complete all successful commands. */ 1444 ok = ch->rslots & ~cstatus; 1445 for (i = 0; i < ch->numslots; i++) { 1446 if ((ok >> i) & 1) 1447 ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE); 1448 } 1449 /* On error, complete the rest of commands with error statuses. */ 1450 if (err) { 1451 if (ch->frozen) { 1452 union ccb *fccb = ch->frozen; 1453 ch->frozen = NULL; 1454 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 1455 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 1456 xpt_freeze_devq(fccb->ccb_h.path, 1); 1457 fccb->ccb_h.status |= CAM_DEV_QFRZN; 1458 } 1459 ahci_done(ch, fccb); 1460 } 1461 for (i = 0; i < ch->numslots; i++) { 1462 /* XXX: reqests in loading state. */ 1463 if (((err >> i) & 1) == 0) 1464 continue; 1465 if (port >= 0 && 1466 ch->slot[i].ccb->ccb_h.target_id != port) 1467 continue; 1468 if (istatus & AHCI_P_IX_TFE) { 1469 if (port != -2) { 1470 /* Task File Error */ 1471 if (ch->numtslotspd[ 1472 ch->slot[i].ccb->ccb_h.target_id] == 0) { 1473 /* Untagged operation. */ 1474 if (i == ccs) 1475 et = AHCI_ERR_TFE; 1476 else 1477 et = AHCI_ERR_INNOCENT; 1478 } else { 1479 /* Tagged operation. */ 1480 et = AHCI_ERR_NCQ; 1481 } 1482 } else { 1483 et = AHCI_ERR_TFE; 1484 ch->fatalerr = 1; 1485 } 1486 } else if (istatus & AHCI_P_IX_IF) { 1487 if (ch->numtslots == 0 && i != ccs && port != -2) 1488 et = AHCI_ERR_INNOCENT; 1489 else 1490 et = AHCI_ERR_SATA; 1491 } else 1492 et = AHCI_ERR_INVALID; 1493 ahci_end_transaction(&ch->slot[i], et); 1494 } 1495 /* 1496 * We can't reinit port if there are some other 1497 * commands active, use resume to complete them. 1498 */ 1499 if (ch->rslots != 0 && !ch->recoverycmd) 1500 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC); 1501 } 1502 /* Process NOTIFY events */ 1503 if (sntf) 1504 ahci_notify_events(ch, sntf); 1505 } 1506 1507 /* Must be called with channel locked. */ 1508 static int 1509 ahci_check_collision(struct ahci_channel *ch, union ccb *ccb) 1510 { 1511 int t = ccb->ccb_h.target_id; 1512 1513 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1514 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1515 /* Tagged command while we have no supported tag free. */ 1516 if (((~ch->oslots) & (0xffffffff >> (32 - 1517 ch->curr[t].tags))) == 0) 1518 return (1); 1519 /* If we have FBS */ 1520 if (ch->fbs_enabled) { 1521 /* Tagged command while untagged are active. */ 1522 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0) 1523 return (1); 1524 } else { 1525 /* Tagged command while untagged are active. */ 1526 if (ch->numrslots != 0 && ch->numtslots == 0) 1527 return (1); 1528 /* Tagged command while tagged to other target is active. */ 1529 if (ch->numtslots != 0 && 1530 ch->taggedtarget != ccb->ccb_h.target_id) 1531 return (1); 1532 } 1533 } else { 1534 /* If we have FBS */ 1535 if (ch->fbs_enabled) { 1536 /* Untagged command while tagged are active. */ 1537 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0) 1538 return (1); 1539 } else { 1540 /* Untagged command while tagged are active. */ 1541 if (ch->numrslots != 0 && ch->numtslots != 0) 1542 return (1); 1543 } 1544 } 1545 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1546 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { 1547 /* Atomic command while anything active. */ 1548 if (ch->numrslots != 0) 1549 return (1); 1550 } 1551 /* We have some atomic command running. */ 1552 if (ch->aslots != 0) 1553 return (1); 1554 return (0); 1555 } 1556 1557 /* Must be called with channel locked. */ 1558 static void 1559 ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb) 1560 { 1561 struct ahci_slot *slot; 1562 int tag, tags; 1563 1564 /* Choose empty slot. */ 1565 tags = ch->numslots; 1566 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1567 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) 1568 tags = ch->curr[ccb->ccb_h.target_id].tags; 1569 if (ch->lastslot + 1 < tags) 1570 tag = ffs(~(ch->oslots >> (ch->lastslot + 1))); 1571 else 1572 tag = 0; 1573 if (tag == 0 || tag + ch->lastslot >= tags) 1574 tag = ffs(~ch->oslots) - 1; 1575 else 1576 tag += ch->lastslot; 1577 ch->lastslot = tag; 1578 /* Occupy chosen slot. */ 1579 slot = &ch->slot[tag]; 1580 slot->ccb = ccb; 1581 /* Stop PM timer. */ 1582 if (ch->numrslots == 0 && ch->pm_level > 3) 1583 callout_stop(&ch->pm_timer); 1584 /* Update channel stats. */ 1585 ch->oslots |= (1 << tag); 1586 ch->numrslots++; 1587 ch->numrslotspd[ccb->ccb_h.target_id]++; 1588 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1589 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1590 ch->numtslots++; 1591 ch->numtslotspd[ccb->ccb_h.target_id]++; 1592 ch->taggedtarget = ccb->ccb_h.target_id; 1593 } 1594 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1595 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) 1596 ch->aslots |= (1 << tag); 1597 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1598 slot->state = AHCI_SLOT_LOADING; 1599 bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, 1600 ahci_dmasetprd, slot, 0); 1601 } else { 1602 slot->dma.nsegs = 0; 1603 ahci_execute_transaction(slot); 1604 } 1605 } 1606 1607 /* Locked by busdma engine. */ 1608 static void 1609 ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1610 { 1611 struct ahci_slot *slot = arg; 1612 struct ahci_channel *ch = slot->ch; 1613 struct ahci_cmd_tab *ctp; 1614 struct ahci_dma_prd *prd; 1615 int i; 1616 1617 if (error) { 1618 device_printf(ch->dev, "DMA load error\n"); 1619 ahci_end_transaction(slot, AHCI_ERR_INVALID); 1620 return; 1621 } 1622 KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n")); 1623 /* Get a piece of the workspace for this request */ 1624 ctp = (struct ahci_cmd_tab *) 1625 (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); 1626 /* Fill S/G table */ 1627 prd = &ctp->prd_tab[0]; 1628 for (i = 0; i < nsegs; i++) { 1629 prd[i].dba = htole64(segs[i].ds_addr); 1630 prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK); 1631 } 1632 slot->dma.nsegs = nsegs; 1633 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, 1634 ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? 1635 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1636 ahci_execute_transaction(slot); 1637 } 1638 1639 /* Must be called with channel locked. */ 1640 static void 1641 ahci_execute_transaction(struct ahci_slot *slot) 1642 { 1643 struct ahci_channel *ch = slot->ch; 1644 struct ahci_cmd_tab *ctp; 1645 struct ahci_cmd_list *clp; 1646 union ccb *ccb = slot->ccb; 1647 int port = ccb->ccb_h.target_id & 0x0f; 1648 int fis_size, i, softreset; 1649 uint8_t *fis = ch->dma.rfis + 0x40; 1650 uint8_t val; 1651 uint16_t cmd_flags; 1652 1653 /* Get a piece of the workspace for this request */ 1654 ctp = (struct ahci_cmd_tab *) 1655 (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); 1656 /* Setup the FIS for this request */ 1657 if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) { 1658 device_printf(ch->dev, "Setting up SATA FIS failed\n"); 1659 ahci_end_transaction(slot, AHCI_ERR_INVALID); 1660 return; 1661 } 1662 /* Setup the command list entry */ 1663 clp = (struct ahci_cmd_list *) 1664 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); 1665 cmd_flags = 1666 (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) | 1667 (ccb->ccb_h.func_code == XPT_SCSI_IO ? 1668 (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) | 1669 (fis_size / sizeof(u_int32_t)) | 1670 (port << 12); 1671 clp->prd_length = htole16(slot->dma.nsegs); 1672 /* Special handling for Soft Reset command. */ 1673 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1674 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) { 1675 if (ccb->ataio.cmd.control & ATA_A_RESET) { 1676 softreset = 1; 1677 /* Kick controller into sane state */ 1678 ahci_stop(ch); 1679 ahci_clo(ch); 1680 ahci_start(ch, 0); 1681 cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY; 1682 } else { 1683 softreset = 2; 1684 /* Prepare FIS receive area for check. */ 1685 for (i = 0; i < 20; i++) 1686 fis[i] = 0xff; 1687 } 1688 } else 1689 softreset = 0; 1690 clp->bytecount = 0; 1691 clp->cmd_flags = htole16(cmd_flags); 1692 clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET + 1693 (AHCI_CT_SIZE * slot->slot)); 1694 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 1695 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1696 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, 1697 BUS_DMASYNC_PREREAD); 1698 /* Set ACTIVE bit for NCQ commands. */ 1699 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1700 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1701 ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot); 1702 } 1703 /* If FBS is enabled, set PMP port. */ 1704 if (ch->fbs_enabled) { 1705 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | 1706 (port << AHCI_P_FBS_DEV_SHIFT)); 1707 } 1708 /* Issue command to the controller. */ 1709 slot->state = AHCI_SLOT_RUNNING; 1710 ch->rslots |= (1 << slot->slot); 1711 ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot)); 1712 /* Device reset commands doesn't interrupt. Poll them. */ 1713 if (ccb->ccb_h.func_code == XPT_ATA_IO && 1714 (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) { 1715 int count, timeout = ccb->ccb_h.timeout * 100; 1716 enum ahci_err_type et = AHCI_ERR_NONE; 1717 1718 for (count = 0; count < timeout; count++) { 1719 DELAY(10); 1720 if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot))) 1721 break; 1722 if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) && 1723 softreset != 1) { 1724 #if 0 1725 device_printf(ch->dev, 1726 "Poll error on slot %d, TFD: %04x\n", 1727 slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD)); 1728 #endif 1729 et = AHCI_ERR_TFE; 1730 break; 1731 } 1732 /* Workaround for ATI SB600/SB700 chipsets. */ 1733 if (ccb->ccb_h.target_id == 15 && 1734 (ch->quirks & AHCI_Q_ATI_PMP_BUG) && 1735 (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) { 1736 et = AHCI_ERR_TIMEOUT; 1737 break; 1738 } 1739 } 1740 1741 /* 1742 * Some Marvell controllers require additional time 1743 * after soft reset to work properly. Setup delay 1744 * to 50ms after soft reset. 1745 */ 1746 if (ch->quirks & AHCI_Q_MRVL_SR_DEL) 1747 DELAY(50000); 1748 1749 /* 1750 * Marvell HBAs with non-RAID firmware do not wait for 1751 * readiness after soft reset, so we have to wait here. 1752 * Marvell RAIDs do not have this problem, but instead 1753 * sometimes forget to update FIS receive area, breaking 1754 * this wait. 1755 */ 1756 if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 && 1757 (ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 && 1758 softreset == 2 && et == AHCI_ERR_NONE) { 1759 for ( ; count < timeout; count++) { 1760 bus_dmamap_sync(ch->dma.rfis_tag, 1761 ch->dma.rfis_map, BUS_DMASYNC_POSTREAD); 1762 val = fis[2]; 1763 bus_dmamap_sync(ch->dma.rfis_tag, 1764 ch->dma.rfis_map, BUS_DMASYNC_PREREAD); 1765 if ((val & ATA_S_BUSY) == 0) 1766 break; 1767 DELAY(10); 1768 } 1769 } 1770 1771 if (timeout && (count >= timeout)) { 1772 device_printf(ch->dev, "Poll timeout on slot %d port %d\n", 1773 slot->slot, port); 1774 device_printf(ch->dev, "is %08x cs %08x ss %08x " 1775 "rs %08x tfd %02x serr %08x cmd %08x\n", 1776 ATA_INL(ch->r_mem, AHCI_P_IS), 1777 ATA_INL(ch->r_mem, AHCI_P_CI), 1778 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, 1779 ATA_INL(ch->r_mem, AHCI_P_TFD), 1780 ATA_INL(ch->r_mem, AHCI_P_SERR), 1781 ATA_INL(ch->r_mem, AHCI_P_CMD)); 1782 et = AHCI_ERR_TIMEOUT; 1783 } 1784 1785 /* Kick controller into sane state and enable FBS. */ 1786 if (softreset == 2) 1787 ch->eslots |= (1 << slot->slot); 1788 ahci_end_transaction(slot, et); 1789 return; 1790 } 1791 /* Start command execution timeout */ 1792 callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout / 2, 1793 0, (timeout_t*)ahci_timeout, slot, 0); 1794 return; 1795 } 1796 1797 /* Must be called with channel locked. */ 1798 static void 1799 ahci_process_timeout(struct ahci_channel *ch) 1800 { 1801 int i; 1802 1803 mtx_assert(&ch->mtx, MA_OWNED); 1804 /* Handle the rest of commands. */ 1805 for (i = 0; i < ch->numslots; i++) { 1806 /* Do we have a running request on slot? */ 1807 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 1808 continue; 1809 ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT); 1810 } 1811 } 1812 1813 /* Must be called with channel locked. */ 1814 static void 1815 ahci_rearm_timeout(struct ahci_channel *ch) 1816 { 1817 int i; 1818 1819 mtx_assert(&ch->mtx, MA_OWNED); 1820 for (i = 0; i < ch->numslots; i++) { 1821 struct ahci_slot *slot = &ch->slot[i]; 1822 1823 /* Do we have a running request on slot? */ 1824 if (slot->state < AHCI_SLOT_RUNNING) 1825 continue; 1826 if ((ch->toslots & (1 << i)) == 0) 1827 continue; 1828 callout_reset_sbt(&slot->timeout, 1829 SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, 1830 (timeout_t*)ahci_timeout, slot, 0); 1831 } 1832 } 1833 1834 /* Locked by callout mechanism. */ 1835 static void 1836 ahci_timeout(struct ahci_slot *slot) 1837 { 1838 struct ahci_channel *ch = slot->ch; 1839 device_t dev = ch->dev; 1840 uint32_t sstatus; 1841 int ccs; 1842 int i; 1843 1844 /* Check for stale timeout. */ 1845 if (slot->state < AHCI_SLOT_RUNNING) 1846 return; 1847 1848 /* Check if slot was not being executed last time we checked. */ 1849 if (slot->state < AHCI_SLOT_EXECUTING) { 1850 /* Check if slot started executing. */ 1851 sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); 1852 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) 1853 >> AHCI_P_CMD_CCS_SHIFT; 1854 if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot || 1855 ch->fbs_enabled || ch->wrongccs) 1856 slot->state = AHCI_SLOT_EXECUTING; 1857 else if ((ch->rslots & (1 << ccs)) == 0) { 1858 ch->wrongccs = 1; 1859 slot->state = AHCI_SLOT_EXECUTING; 1860 } 1861 1862 callout_reset_sbt(&slot->timeout, 1863 SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, 1864 (timeout_t*)ahci_timeout, slot, 0); 1865 return; 1866 } 1867 1868 device_printf(dev, "Timeout on slot %d port %d\n", 1869 slot->slot, slot->ccb->ccb_h.target_id & 0x0f); 1870 device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x " 1871 "serr %08x cmd %08x\n", 1872 ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), 1873 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, 1874 ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), 1875 ATA_INL(ch->r_mem, AHCI_P_CMD)); 1876 1877 /* Handle frozen command. */ 1878 if (ch->frozen) { 1879 union ccb *fccb = ch->frozen; 1880 ch->frozen = NULL; 1881 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 1882 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 1883 xpt_freeze_devq(fccb->ccb_h.path, 1); 1884 fccb->ccb_h.status |= CAM_DEV_QFRZN; 1885 } 1886 ahci_done(ch, fccb); 1887 } 1888 if (!ch->fbs_enabled && !ch->wrongccs) { 1889 /* Without FBS we know real timeout source. */ 1890 ch->fatalerr = 1; 1891 /* Handle command with timeout. */ 1892 ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT); 1893 /* Handle the rest of commands. */ 1894 for (i = 0; i < ch->numslots; i++) { 1895 /* Do we have a running request on slot? */ 1896 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 1897 continue; 1898 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); 1899 } 1900 } else { 1901 /* With FBS we wait for other commands timeout and pray. */ 1902 if (ch->toslots == 0) 1903 xpt_freeze_simq(ch->sim, 1); 1904 ch->toslots |= (1 << slot->slot); 1905 if ((ch->rslots & ~ch->toslots) == 0) 1906 ahci_process_timeout(ch); 1907 else 1908 device_printf(dev, " ... waiting for slots %08x\n", 1909 ch->rslots & ~ch->toslots); 1910 } 1911 } 1912 1913 /* Must be called with channel locked. */ 1914 static void 1915 ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et) 1916 { 1917 struct ahci_channel *ch = slot->ch; 1918 union ccb *ccb = slot->ccb; 1919 struct ahci_cmd_list *clp; 1920 int lastto; 1921 uint32_t sig; 1922 1923 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 1924 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1925 clp = (struct ahci_cmd_list *) 1926 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); 1927 /* Read result registers to the result struct 1928 * May be incorrect if several commands finished same time, 1929 * so read only when sure or have to. 1930 */ 1931 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1932 struct ata_res *res = &ccb->ataio.res; 1933 1934 if ((et == AHCI_ERR_TFE) || 1935 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { 1936 u_int8_t *fis = ch->dma.rfis + 0x40; 1937 1938 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, 1939 BUS_DMASYNC_POSTREAD); 1940 if (ch->fbs_enabled) { 1941 fis += ccb->ccb_h.target_id * 256; 1942 res->status = fis[2]; 1943 res->error = fis[3]; 1944 } else { 1945 uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD); 1946 1947 res->status = tfd; 1948 res->error = tfd >> 8; 1949 } 1950 res->lba_low = fis[4]; 1951 res->lba_mid = fis[5]; 1952 res->lba_high = fis[6]; 1953 res->device = fis[7]; 1954 res->lba_low_exp = fis[8]; 1955 res->lba_mid_exp = fis[9]; 1956 res->lba_high_exp = fis[10]; 1957 res->sector_count = fis[12]; 1958 res->sector_count_exp = fis[13]; 1959 1960 /* 1961 * Some weird controllers do not return signature in 1962 * FIS receive area. Read it from PxSIG register. 1963 */ 1964 if ((ch->quirks & AHCI_Q_ALTSIG) && 1965 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1966 (ccb->ataio.cmd.control & ATA_A_RESET) == 0) { 1967 sig = ATA_INL(ch->r_mem, AHCI_P_SIG); 1968 res->lba_high = sig >> 24; 1969 res->lba_mid = sig >> 16; 1970 res->lba_low = sig >> 8; 1971 res->sector_count = sig; 1972 } 1973 } else 1974 bzero(res, sizeof(*res)); 1975 if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 && 1976 (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1977 (ch->quirks & AHCI_Q_NOCOUNT) == 0) { 1978 ccb->ataio.resid = 1979 ccb->ataio.dxfer_len - le32toh(clp->bytecount); 1980 } 1981 } else { 1982 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1983 (ch->quirks & AHCI_Q_NOCOUNT) == 0) { 1984 ccb->csio.resid = 1985 ccb->csio.dxfer_len - le32toh(clp->bytecount); 1986 } 1987 } 1988 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1989 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, 1990 (ccb->ccb_h.flags & CAM_DIR_IN) ? 1991 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1992 bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); 1993 } 1994 if (et != AHCI_ERR_NONE) 1995 ch->eslots |= (1 << slot->slot); 1996 /* In case of error, freeze device for proper recovery. */ 1997 if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) && 1998 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 1999 xpt_freeze_devq(ccb->ccb_h.path, 1); 2000 ccb->ccb_h.status |= CAM_DEV_QFRZN; 2001 } 2002 /* Set proper result status. */ 2003 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2004 switch (et) { 2005 case AHCI_ERR_NONE: 2006 ccb->ccb_h.status |= CAM_REQ_CMP; 2007 if (ccb->ccb_h.func_code == XPT_SCSI_IO) 2008 ccb->csio.scsi_status = SCSI_STATUS_OK; 2009 break; 2010 case AHCI_ERR_INVALID: 2011 ch->fatalerr = 1; 2012 ccb->ccb_h.status |= CAM_REQ_INVALID; 2013 break; 2014 case AHCI_ERR_INNOCENT: 2015 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 2016 break; 2017 case AHCI_ERR_TFE: 2018 case AHCI_ERR_NCQ: 2019 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 2020 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 2021 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 2022 } else { 2023 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; 2024 } 2025 break; 2026 case AHCI_ERR_SATA: 2027 ch->fatalerr = 1; 2028 if (!ch->recoverycmd) { 2029 xpt_freeze_simq(ch->sim, 1); 2030 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2031 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2032 } 2033 ccb->ccb_h.status |= CAM_UNCOR_PARITY; 2034 break; 2035 case AHCI_ERR_TIMEOUT: 2036 if (!ch->recoverycmd) { 2037 xpt_freeze_simq(ch->sim, 1); 2038 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 2039 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 2040 } 2041 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 2042 break; 2043 default: 2044 ch->fatalerr = 1; 2045 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 2046 } 2047 /* Free slot. */ 2048 ch->oslots &= ~(1 << slot->slot); 2049 ch->rslots &= ~(1 << slot->slot); 2050 ch->aslots &= ~(1 << slot->slot); 2051 slot->state = AHCI_SLOT_EMPTY; 2052 slot->ccb = NULL; 2053 /* Update channel stats. */ 2054 ch->numrslots--; 2055 ch->numrslotspd[ccb->ccb_h.target_id]--; 2056 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 2057 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 2058 ch->numtslots--; 2059 ch->numtslotspd[ccb->ccb_h.target_id]--; 2060 } 2061 /* Cancel timeout state if request completed normally. */ 2062 if (et != AHCI_ERR_TIMEOUT) { 2063 lastto = (ch->toslots == (1 << slot->slot)); 2064 ch->toslots &= ~(1 << slot->slot); 2065 if (lastto) 2066 xpt_release_simq(ch->sim, TRUE); 2067 } 2068 /* If it was first request of reset sequence and there is no error, 2069 * proceed to second request. */ 2070 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 2071 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 2072 (ccb->ataio.cmd.control & ATA_A_RESET) && 2073 et == AHCI_ERR_NONE) { 2074 ccb->ataio.cmd.control &= ~ATA_A_RESET; 2075 ahci_begin_transaction(ch, ccb); 2076 return; 2077 } 2078 /* If it was our READ LOG command - process it. */ 2079 if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { 2080 ahci_process_read_log(ch, ccb); 2081 /* If it was our REQUEST SENSE command - process it. */ 2082 } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { 2083 ahci_process_request_sense(ch, ccb); 2084 /* If it was NCQ or ATAPI command error, put result on hold. */ 2085 } else if (et == AHCI_ERR_NCQ || 2086 ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && 2087 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { 2088 ch->hold[slot->slot] = ccb; 2089 ch->numhslots++; 2090 } else 2091 ahci_done(ch, ccb); 2092 /* If we have no other active commands, ... */ 2093 if (ch->rslots == 0) { 2094 /* if there was fatal error - reset port. */ 2095 if (ch->toslots != 0 || ch->fatalerr) { 2096 ahci_reset(ch); 2097 } else { 2098 /* if we have slots in error, we can reinit port. */ 2099 if (ch->eslots != 0) { 2100 ahci_stop(ch); 2101 ahci_clo(ch); 2102 ahci_start(ch, 1); 2103 } 2104 /* if there commands on hold, we can do READ LOG. */ 2105 if (!ch->recoverycmd && ch->numhslots) 2106 ahci_issue_recovery(ch); 2107 } 2108 /* If all the rest of commands are in timeout - give them chance. */ 2109 } else if ((ch->rslots & ~ch->toslots) == 0 && 2110 et != AHCI_ERR_TIMEOUT) 2111 ahci_rearm_timeout(ch); 2112 /* Unfreeze frozen command. */ 2113 if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) { 2114 union ccb *fccb = ch->frozen; 2115 ch->frozen = NULL; 2116 ahci_begin_transaction(ch, fccb); 2117 xpt_release_simq(ch->sim, TRUE); 2118 } 2119 /* Start PM timer. */ 2120 if (ch->numrslots == 0 && ch->pm_level > 3 && 2121 (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) { 2122 callout_schedule(&ch->pm_timer, 2123 (ch->pm_level == 4) ? hz / 1000 : hz / 8); 2124 } 2125 } 2126 2127 static void 2128 ahci_issue_recovery(struct ahci_channel *ch) 2129 { 2130 union ccb *ccb; 2131 struct ccb_ataio *ataio; 2132 struct ccb_scsiio *csio; 2133 int i; 2134 2135 /* Find some held command. */ 2136 for (i = 0; i < ch->numslots; i++) { 2137 if (ch->hold[i]) 2138 break; 2139 } 2140 ccb = xpt_alloc_ccb_nowait(); 2141 if (ccb == NULL) { 2142 device_printf(ch->dev, "Unable to allocate recovery command\n"); 2143 completeall: 2144 /* We can't do anything -- complete held commands. */ 2145 for (i = 0; i < ch->numslots; i++) { 2146 if (ch->hold[i] == NULL) 2147 continue; 2148 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2149 ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; 2150 ahci_done(ch, ch->hold[i]); 2151 ch->hold[i] = NULL; 2152 ch->numhslots--; 2153 } 2154 ahci_reset(ch); 2155 return; 2156 } 2157 ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */ 2158 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 2159 /* READ LOG */ 2160 ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; 2161 ccb->ccb_h.func_code = XPT_ATA_IO; 2162 ccb->ccb_h.flags = CAM_DIR_IN; 2163 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ 2164 ataio = &ccb->ataio; 2165 ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT); 2166 if (ataio->data_ptr == NULL) { 2167 xpt_free_ccb(ccb); 2168 device_printf(ch->dev, 2169 "Unable to allocate memory for READ LOG command\n"); 2170 goto completeall; 2171 } 2172 ataio->dxfer_len = 512; 2173 bzero(&ataio->cmd, sizeof(ataio->cmd)); 2174 ataio->cmd.flags = CAM_ATAIO_48BIT; 2175 ataio->cmd.command = 0x2F; /* READ LOG EXT */ 2176 ataio->cmd.sector_count = 1; 2177 ataio->cmd.sector_count_exp = 0; 2178 ataio->cmd.lba_low = 0x10; 2179 ataio->cmd.lba_mid = 0; 2180 ataio->cmd.lba_mid_exp = 0; 2181 } else { 2182 /* REQUEST SENSE */ 2183 ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; 2184 ccb->ccb_h.recovery_slot = i; 2185 ccb->ccb_h.func_code = XPT_SCSI_IO; 2186 ccb->ccb_h.flags = CAM_DIR_IN; 2187 ccb->ccb_h.status = 0; 2188 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ 2189 csio = &ccb->csio; 2190 csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; 2191 csio->dxfer_len = ch->hold[i]->csio.sense_len; 2192 csio->cdb_len = 6; 2193 bzero(&csio->cdb_io, sizeof(csio->cdb_io)); 2194 csio->cdb_io.cdb_bytes[0] = 0x03; 2195 csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; 2196 } 2197 /* Freeze SIM while doing recovery. */ 2198 ch->recoverycmd = 1; 2199 xpt_freeze_simq(ch->sim, 1); 2200 ahci_begin_transaction(ch, ccb); 2201 } 2202 2203 static void 2204 ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb) 2205 { 2206 uint8_t *data; 2207 struct ata_res *res; 2208 int i; 2209 2210 ch->recoverycmd = 0; 2211 2212 data = ccb->ataio.data_ptr; 2213 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2214 (data[0] & 0x80) == 0) { 2215 for (i = 0; i < ch->numslots; i++) { 2216 if (!ch->hold[i]) 2217 continue; 2218 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) 2219 continue; 2220 if ((data[0] & 0x1F) == i) { 2221 res = &ch->hold[i]->ataio.res; 2222 res->status = data[2]; 2223 res->error = data[3]; 2224 res->lba_low = data[4]; 2225 res->lba_mid = data[5]; 2226 res->lba_high = data[6]; 2227 res->device = data[7]; 2228 res->lba_low_exp = data[8]; 2229 res->lba_mid_exp = data[9]; 2230 res->lba_high_exp = data[10]; 2231 res->sector_count = data[12]; 2232 res->sector_count_exp = data[13]; 2233 } else { 2234 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2235 ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; 2236 } 2237 ahci_done(ch, ch->hold[i]); 2238 ch->hold[i] = NULL; 2239 ch->numhslots--; 2240 } 2241 } else { 2242 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 2243 device_printf(ch->dev, "Error while READ LOG EXT\n"); 2244 else if ((data[0] & 0x80) == 0) { 2245 device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n"); 2246 } 2247 for (i = 0; i < ch->numslots; i++) { 2248 if (!ch->hold[i]) 2249 continue; 2250 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) 2251 continue; 2252 ahci_done(ch, ch->hold[i]); 2253 ch->hold[i] = NULL; 2254 ch->numhslots--; 2255 } 2256 } 2257 free(ccb->ataio.data_ptr, M_AHCI); 2258 xpt_free_ccb(ccb); 2259 xpt_release_simq(ch->sim, TRUE); 2260 } 2261 2262 static void 2263 ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb) 2264 { 2265 int i; 2266 2267 ch->recoverycmd = 0; 2268 2269 i = ccb->ccb_h.recovery_slot; 2270 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 2271 ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; 2272 } else { 2273 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2274 ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2275 } 2276 ahci_done(ch, ch->hold[i]); 2277 ch->hold[i] = NULL; 2278 ch->numhslots--; 2279 xpt_free_ccb(ccb); 2280 xpt_release_simq(ch->sim, TRUE); 2281 } 2282 2283 static void 2284 ahci_start(struct ahci_channel *ch, int fbs) 2285 { 2286 u_int32_t cmd; 2287 2288 /* Run the channel start callback, if any. */ 2289 if (ch->start) 2290 ch->start(ch); 2291 2292 /* Clear SATA error register */ 2293 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF); 2294 /* Clear any interrupts pending on this channel */ 2295 ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF); 2296 /* Configure FIS-based switching if supported. */ 2297 if (ch->chcaps & AHCI_P_CMD_FBSCP) { 2298 ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0; 2299 ATA_OUTL(ch->r_mem, AHCI_P_FBS, 2300 ch->fbs_enabled ? AHCI_P_FBS_EN : 0); 2301 } 2302 /* Start operations on this channel */ 2303 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2304 cmd &= ~AHCI_P_CMD_PMA; 2305 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST | 2306 (ch->pm_present ? AHCI_P_CMD_PMA : 0)); 2307 } 2308 2309 static void 2310 ahci_stop(struct ahci_channel *ch) 2311 { 2312 u_int32_t cmd; 2313 int timeout; 2314 2315 /* Kill all activity on this channel */ 2316 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2317 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST); 2318 /* Wait for activity stop. */ 2319 timeout = 0; 2320 do { 2321 DELAY(10); 2322 if (timeout++ > 50000) { 2323 device_printf(ch->dev, "stopping AHCI engine failed\n"); 2324 break; 2325 } 2326 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR); 2327 ch->eslots = 0; 2328 } 2329 2330 static void 2331 ahci_clo(struct ahci_channel *ch) 2332 { 2333 u_int32_t cmd; 2334 int timeout; 2335 2336 /* Issue Command List Override if supported */ 2337 if (ch->caps & AHCI_CAP_SCLO) { 2338 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2339 cmd |= AHCI_P_CMD_CLO; 2340 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd); 2341 timeout = 0; 2342 do { 2343 DELAY(10); 2344 if (timeout++ > 50000) { 2345 device_printf(ch->dev, "executing CLO failed\n"); 2346 break; 2347 } 2348 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO); 2349 } 2350 } 2351 2352 static void 2353 ahci_stop_fr(struct ahci_channel *ch) 2354 { 2355 u_int32_t cmd; 2356 int timeout; 2357 2358 /* Kill all FIS reception on this channel */ 2359 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2360 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE); 2361 /* Wait for FIS reception stop. */ 2362 timeout = 0; 2363 do { 2364 DELAY(10); 2365 if (timeout++ > 50000) { 2366 device_printf(ch->dev, "stopping AHCI FR engine failed\n"); 2367 break; 2368 } 2369 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR); 2370 } 2371 2372 static void 2373 ahci_start_fr(struct ahci_channel *ch) 2374 { 2375 u_int32_t cmd; 2376 2377 /* Start FIS reception on this channel */ 2378 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2379 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE); 2380 } 2381 2382 static int 2383 ahci_wait_ready(struct ahci_channel *ch, int t, int t0) 2384 { 2385 int timeout = 0; 2386 uint32_t val; 2387 2388 while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) & 2389 (ATA_S_BUSY | ATA_S_DRQ)) { 2390 if (timeout > t) { 2391 if (t != 0) { 2392 device_printf(ch->dev, 2393 "AHCI reset: device not ready after %dms " 2394 "(tfd = %08x)\n", 2395 MAX(t, 0) + t0, val); 2396 } 2397 return (EBUSY); 2398 } 2399 DELAY(1000); 2400 timeout++; 2401 } 2402 if (bootverbose) 2403 device_printf(ch->dev, "AHCI reset: device ready after %dms\n", 2404 timeout + t0); 2405 return (0); 2406 } 2407 2408 static void 2409 ahci_reset_to(void *arg) 2410 { 2411 struct ahci_channel *ch = arg; 2412 2413 if (ch->resetting == 0) 2414 return; 2415 ch->resetting--; 2416 if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0, 2417 (310 - ch->resetting) * 100) == 0) { 2418 ch->resetting = 0; 2419 ahci_start(ch, 1); 2420 xpt_release_simq(ch->sim, TRUE); 2421 return; 2422 } 2423 if (ch->resetting == 0) { 2424 ahci_clo(ch); 2425 ahci_start(ch, 1); 2426 xpt_release_simq(ch->sim, TRUE); 2427 return; 2428 } 2429 callout_schedule(&ch->reset_timer, hz / 10); 2430 } 2431 2432 static void 2433 ahci_reset(struct ahci_channel *ch) 2434 { 2435 struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); 2436 int i; 2437 2438 xpt_freeze_simq(ch->sim, 1); 2439 if (bootverbose) 2440 device_printf(ch->dev, "AHCI reset...\n"); 2441 /* Forget about previous reset. */ 2442 if (ch->resetting) { 2443 ch->resetting = 0; 2444 callout_stop(&ch->reset_timer); 2445 xpt_release_simq(ch->sim, TRUE); 2446 } 2447 /* Requeue freezed command. */ 2448 if (ch->frozen) { 2449 union ccb *fccb = ch->frozen; 2450 ch->frozen = NULL; 2451 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 2452 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 2453 xpt_freeze_devq(fccb->ccb_h.path, 1); 2454 fccb->ccb_h.status |= CAM_DEV_QFRZN; 2455 } 2456 ahci_done(ch, fccb); 2457 } 2458 /* Kill the engine and requeue all running commands. */ 2459 ahci_stop(ch); 2460 for (i = 0; i < ch->numslots; i++) { 2461 /* Do we have a running request on slot? */ 2462 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 2463 continue; 2464 /* XXX; Commands in loading state. */ 2465 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); 2466 } 2467 for (i = 0; i < ch->numslots; i++) { 2468 if (!ch->hold[i]) 2469 continue; 2470 ahci_done(ch, ch->hold[i]); 2471 ch->hold[i] = NULL; 2472 ch->numhslots--; 2473 } 2474 if (ch->toslots != 0) 2475 xpt_release_simq(ch->sim, TRUE); 2476 ch->eslots = 0; 2477 ch->toslots = 0; 2478 ch->wrongccs = 0; 2479 ch->fatalerr = 0; 2480 /* Tell the XPT about the event */ 2481 xpt_async(AC_BUS_RESET, ch->path, NULL); 2482 /* Disable port interrupts */ 2483 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 2484 /* Reset and reconnect PHY, */ 2485 if (!ahci_sata_phy_reset(ch)) { 2486 if (bootverbose) 2487 device_printf(ch->dev, 2488 "AHCI reset: device not found\n"); 2489 ch->devices = 0; 2490 /* Enable wanted port interrupts */ 2491 ATA_OUTL(ch->r_mem, AHCI_P_IE, 2492 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | 2493 AHCI_P_IX_PRC | AHCI_P_IX_PC)); 2494 xpt_release_simq(ch->sim, TRUE); 2495 return; 2496 } 2497 if (bootverbose) 2498 device_printf(ch->dev, "AHCI reset: device found\n"); 2499 /* Wait for clearing busy status. */ 2500 if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) { 2501 if (dumping) 2502 ahci_clo(ch); 2503 else 2504 ch->resetting = 310; 2505 } 2506 ch->devices = 1; 2507 /* Enable wanted port interrupts */ 2508 ATA_OUTL(ch->r_mem, AHCI_P_IE, 2509 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | 2510 AHCI_P_IX_TFE | AHCI_P_IX_HBF | 2511 AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF | 2512 ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC | 2513 AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) | 2514 AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR))); 2515 if (ch->resetting) 2516 callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch); 2517 else { 2518 ahci_start(ch, 1); 2519 xpt_release_simq(ch->sim, TRUE); 2520 } 2521 } 2522 2523 static int 2524 ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag) 2525 { 2526 u_int8_t *fis = &ctp->cfis[0]; 2527 2528 bzero(fis, 20); 2529 fis[0] = 0x27; /* host to device */ 2530 fis[1] = (ccb->ccb_h.target_id & 0x0f); 2531 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 2532 fis[1] |= 0x80; 2533 fis[2] = ATA_PACKET_CMD; 2534 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 2535 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 2536 fis[3] = ATA_F_DMA; 2537 else { 2538 fis[5] = ccb->csio.dxfer_len; 2539 fis[6] = ccb->csio.dxfer_len >> 8; 2540 } 2541 fis[7] = ATA_D_LBA; 2542 fis[15] = ATA_A_4BIT; 2543 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 2544 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, 2545 ctp->acmd, ccb->csio.cdb_len); 2546 bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len); 2547 } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { 2548 fis[1] |= 0x80; 2549 fis[2] = ccb->ataio.cmd.command; 2550 fis[3] = ccb->ataio.cmd.features; 2551 fis[4] = ccb->ataio.cmd.lba_low; 2552 fis[5] = ccb->ataio.cmd.lba_mid; 2553 fis[6] = ccb->ataio.cmd.lba_high; 2554 fis[7] = ccb->ataio.cmd.device; 2555 fis[8] = ccb->ataio.cmd.lba_low_exp; 2556 fis[9] = ccb->ataio.cmd.lba_mid_exp; 2557 fis[10] = ccb->ataio.cmd.lba_high_exp; 2558 fis[11] = ccb->ataio.cmd.features_exp; 2559 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { 2560 fis[12] = tag << 3; 2561 } else { 2562 fis[12] = ccb->ataio.cmd.sector_count; 2563 } 2564 fis[13] = ccb->ataio.cmd.sector_count_exp; 2565 fis[15] = ATA_A_4BIT; 2566 } else { 2567 fis[15] = ccb->ataio.cmd.control; 2568 } 2569 if (ccb->ataio.ata_flags & ATA_FLAG_AUX) { 2570 fis[16] = ccb->ataio.aux & 0xff; 2571 fis[17] = (ccb->ataio.aux >> 8) & 0xff; 2572 fis[18] = (ccb->ataio.aux >> 16) & 0xff; 2573 fis[19] = (ccb->ataio.aux >> 24) & 0xff; 2574 } 2575 return (20); 2576 } 2577 2578 static int 2579 ahci_sata_connect(struct ahci_channel *ch) 2580 { 2581 u_int32_t status; 2582 int timeout, found = 0; 2583 2584 /* Wait up to 100ms for "connect well" */ 2585 for (timeout = 0; timeout < 1000 ; timeout++) { 2586 status = ATA_INL(ch->r_mem, AHCI_P_SSTS); 2587 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) 2588 found = 1; 2589 if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && 2590 ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && 2591 ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) 2592 break; 2593 if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { 2594 if (bootverbose) { 2595 device_printf(ch->dev, "SATA offline status=%08x\n", 2596 status); 2597 } 2598 return (0); 2599 } 2600 if (found == 0 && timeout >= 100) 2601 break; 2602 DELAY(100); 2603 } 2604 if (timeout >= 1000 || !found) { 2605 if (bootverbose) { 2606 device_printf(ch->dev, 2607 "SATA connect timeout time=%dus status=%08x\n", 2608 timeout * 100, status); 2609 } 2610 return (0); 2611 } 2612 if (bootverbose) { 2613 device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", 2614 timeout * 100, status); 2615 } 2616 /* Clear SATA error register */ 2617 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff); 2618 return (1); 2619 } 2620 2621 static int 2622 ahci_sata_phy_reset(struct ahci_channel *ch) 2623 { 2624 int sata_rev; 2625 uint32_t val, detval; 2626 2627 if (ch->listening) { 2628 val = ATA_INL(ch->r_mem, AHCI_P_CMD); 2629 val |= AHCI_P_CMD_SUD; 2630 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); 2631 ch->listening = 0; 2632 } 2633 sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; 2634 if (sata_rev == 1) 2635 val = ATA_SC_SPD_SPEED_GEN1; 2636 else if (sata_rev == 2) 2637 val = ATA_SC_SPD_SPEED_GEN2; 2638 else if (sata_rev == 3) 2639 val = ATA_SC_SPD_SPEED_GEN3; 2640 else 2641 val = 0; 2642 detval = ahci_ch_detval(ch, ATA_SC_DET_RESET); 2643 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 2644 detval | val | 2645 ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER); 2646 DELAY(1000); 2647 detval = ahci_ch_detval(ch, ATA_SC_DET_IDLE); 2648 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 2649 detval | val | ((ch->pm_level > 0) ? 0 : 2650 (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); 2651 if (!ahci_sata_connect(ch)) { 2652 if (ch->caps & AHCI_CAP_SSS) { 2653 val = ATA_INL(ch->r_mem, AHCI_P_CMD); 2654 val &= ~AHCI_P_CMD_SUD; 2655 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); 2656 ch->listening = 1; 2657 } else if (ch->pm_level > 0) 2658 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); 2659 return (0); 2660 } 2661 return (1); 2662 } 2663 2664 static int 2665 ahci_check_ids(struct ahci_channel *ch, union ccb *ccb) 2666 { 2667 2668 if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) { 2669 ccb->ccb_h.status = CAM_TID_INVALID; 2670 ahci_done(ch, ccb); 2671 return (-1); 2672 } 2673 if (ccb->ccb_h.target_lun != 0) { 2674 ccb->ccb_h.status = CAM_LUN_INVALID; 2675 ahci_done(ch, ccb); 2676 return (-1); 2677 } 2678 return (0); 2679 } 2680 2681 static void 2682 ahciaction(struct cam_sim *sim, union ccb *ccb) 2683 { 2684 struct ahci_channel *ch; 2685 2686 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n", 2687 ccb->ccb_h.func_code)); 2688 2689 ch = (struct ahci_channel *)cam_sim_softc(sim); 2690 switch (ccb->ccb_h.func_code) { 2691 /* Common cases first */ 2692 case XPT_ATA_IO: /* Execute the requested I/O operation */ 2693 case XPT_SCSI_IO: 2694 if (ahci_check_ids(ch, ccb)) 2695 return; 2696 if (ch->devices == 0 || 2697 (ch->pm_present == 0 && 2698 ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { 2699 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 2700 break; 2701 } 2702 ccb->ccb_h.recovery_type = RECOVERY_NONE; 2703 /* Check for command collision. */ 2704 if (ahci_check_collision(ch, ccb)) { 2705 /* Freeze command. */ 2706 ch->frozen = ccb; 2707 /* We have only one frozen slot, so freeze simq also. */ 2708 xpt_freeze_simq(ch->sim, 1); 2709 return; 2710 } 2711 ahci_begin_transaction(ch, ccb); 2712 return; 2713 case XPT_ABORT: /* Abort the specified CCB */ 2714 /* XXX Implement */ 2715 ccb->ccb_h.status = CAM_REQ_INVALID; 2716 break; 2717 case XPT_SET_TRAN_SETTINGS: 2718 { 2719 struct ccb_trans_settings *cts = &ccb->cts; 2720 struct ahci_device *d; 2721 2722 if (ahci_check_ids(ch, ccb)) 2723 return; 2724 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 2725 d = &ch->curr[ccb->ccb_h.target_id]; 2726 else 2727 d = &ch->user[ccb->ccb_h.target_id]; 2728 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) 2729 d->revision = cts->xport_specific.sata.revision; 2730 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) 2731 d->mode = cts->xport_specific.sata.mode; 2732 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) 2733 d->bytecount = min(8192, cts->xport_specific.sata.bytecount); 2734 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) 2735 d->tags = min(ch->numslots, cts->xport_specific.sata.tags); 2736 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) 2737 ch->pm_present = cts->xport_specific.sata.pm_present; 2738 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) 2739 d->atapi = cts->xport_specific.sata.atapi; 2740 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) 2741 d->caps = cts->xport_specific.sata.caps; 2742 ccb->ccb_h.status = CAM_REQ_CMP; 2743 break; 2744 } 2745 case XPT_GET_TRAN_SETTINGS: 2746 /* Get default/user set transfer settings for the target */ 2747 { 2748 struct ccb_trans_settings *cts = &ccb->cts; 2749 struct ahci_device *d; 2750 uint32_t status; 2751 2752 if (ahci_check_ids(ch, ccb)) 2753 return; 2754 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 2755 d = &ch->curr[ccb->ccb_h.target_id]; 2756 else 2757 d = &ch->user[ccb->ccb_h.target_id]; 2758 cts->protocol = PROTO_UNSPECIFIED; 2759 cts->protocol_version = PROTO_VERSION_UNSPECIFIED; 2760 cts->transport = XPORT_SATA; 2761 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 2762 cts->proto_specific.valid = 0; 2763 cts->xport_specific.sata.valid = 0; 2764 if (cts->type == CTS_TYPE_CURRENT_SETTINGS && 2765 (ccb->ccb_h.target_id == 15 || 2766 (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { 2767 status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK; 2768 if (status & 0x0f0) { 2769 cts->xport_specific.sata.revision = 2770 (status & 0x0f0) >> 4; 2771 cts->xport_specific.sata.valid |= 2772 CTS_SATA_VALID_REVISION; 2773 } 2774 cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; 2775 if (ch->pm_level) { 2776 if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC)) 2777 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; 2778 if (ch->caps2 & AHCI_CAP2_APST) 2779 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST; 2780 } 2781 if ((ch->caps & AHCI_CAP_SNCQ) && 2782 (ch->quirks & AHCI_Q_NOAA) == 0) 2783 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA; 2784 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; 2785 cts->xport_specific.sata.caps &= 2786 ch->user[ccb->ccb_h.target_id].caps; 2787 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 2788 } else { 2789 cts->xport_specific.sata.revision = d->revision; 2790 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; 2791 cts->xport_specific.sata.caps = d->caps; 2792 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 2793 } 2794 cts->xport_specific.sata.mode = d->mode; 2795 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; 2796 cts->xport_specific.sata.bytecount = d->bytecount; 2797 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; 2798 cts->xport_specific.sata.pm_present = ch->pm_present; 2799 cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; 2800 cts->xport_specific.sata.tags = d->tags; 2801 cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; 2802 cts->xport_specific.sata.atapi = d->atapi; 2803 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; 2804 ccb->ccb_h.status = CAM_REQ_CMP; 2805 break; 2806 } 2807 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 2808 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 2809 ahci_reset(ch); 2810 ccb->ccb_h.status = CAM_REQ_CMP; 2811 break; 2812 case XPT_TERM_IO: /* Terminate the I/O process */ 2813 /* XXX Implement */ 2814 ccb->ccb_h.status = CAM_REQ_INVALID; 2815 break; 2816 case XPT_PATH_INQ: /* Path routing inquiry */ 2817 { 2818 struct ccb_pathinq *cpi = &ccb->cpi; 2819 2820 cpi->version_num = 1; /* XXX??? */ 2821 cpi->hba_inquiry = PI_SDTR_ABLE; 2822 if (ch->caps & AHCI_CAP_SNCQ) 2823 cpi->hba_inquiry |= PI_TAG_ABLE; 2824 if (ch->caps & AHCI_CAP_SPM) 2825 cpi->hba_inquiry |= PI_SATAPM; 2826 cpi->target_sprt = 0; 2827 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; 2828 if ((ch->quirks & AHCI_Q_NOAUX) == 0) 2829 cpi->hba_misc |= PIM_ATA_EXT; 2830 cpi->hba_eng_cnt = 0; 2831 if (ch->caps & AHCI_CAP_SPM) 2832 cpi->max_target = 15; 2833 else 2834 cpi->max_target = 0; 2835 cpi->max_lun = 0; 2836 cpi->initiator_id = 0; 2837 cpi->bus_id = cam_sim_bus(sim); 2838 cpi->base_transfer_speed = 150000; 2839 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2840 strlcpy(cpi->hba_vid, "AHCI", HBA_IDLEN); 2841 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2842 cpi->unit_number = cam_sim_unit(sim); 2843 cpi->transport = XPORT_SATA; 2844 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 2845 cpi->protocol = PROTO_ATA; 2846 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 2847 cpi->maxio = MAXPHYS; 2848 /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */ 2849 if (ch->quirks & AHCI_Q_MAXIO_64K) 2850 cpi->maxio = min(cpi->maxio, 128 * 512); 2851 cpi->hba_vendor = ch->vendorid; 2852 cpi->hba_device = ch->deviceid; 2853 cpi->hba_subvendor = ch->subvendorid; 2854 cpi->hba_subdevice = ch->subdeviceid; 2855 cpi->ccb_h.status = CAM_REQ_CMP; 2856 break; 2857 } 2858 default: 2859 ccb->ccb_h.status = CAM_REQ_INVALID; 2860 break; 2861 } 2862 ahci_done(ch, ccb); 2863 } 2864 2865 static void 2866 ahcipoll(struct cam_sim *sim) 2867 { 2868 struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim); 2869 uint32_t istatus; 2870 2871 /* Read interrupt statuses and process if any. */ 2872 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 2873 if (istatus != 0) 2874 ahci_ch_intr_main(ch, istatus); 2875 if (ch->resetting != 0 && 2876 (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) { 2877 ch->resetpolldiv = 1000; 2878 ahci_reset_to(ch); 2879 } 2880 } 2881 2882 devclass_t ahci_devclass; 2883 2884 MODULE_VERSION(ahci, 1); 2885 MODULE_DEPEND(ahci, cam, 1, 1, 1); 2886