1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009-2012 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/module.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/conf.h> 38 #include <sys/endian.h> 39 #include <sys/malloc.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/sysctl.h> 43 #include <machine/stdarg.h> 44 #include <machine/resource.h> 45 #include <machine/bus.h> 46 #include <sys/rman.h> 47 #include "ahci.h" 48 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include <cam/cam_sim.h> 52 #include <cam/cam_xpt_sim.h> 53 #include <cam/cam_debug.h> 54 55 /* local prototypes */ 56 static void ahci_intr(void *data); 57 static void ahci_intr_one(void *data); 58 static void ahci_intr_one_edge(void *data); 59 static int ahci_ch_init(device_t dev); 60 static int ahci_ch_deinit(device_t dev); 61 static int ahci_ch_suspend(device_t dev); 62 static int ahci_ch_resume(device_t dev); 63 static void ahci_ch_pm(void *arg); 64 static void ahci_ch_intr(void *arg); 65 static void ahci_ch_intr_direct(void *arg); 66 static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus); 67 static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb); 68 static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 69 static void ahci_execute_transaction(struct ahci_slot *slot); 70 static void ahci_timeout(struct ahci_slot *slot); 71 static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et); 72 static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag); 73 static void ahci_dmainit(device_t dev); 74 static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); 75 static void ahci_dmafini(device_t dev); 76 static void ahci_slotsalloc(device_t dev); 77 static void ahci_slotsfree(device_t dev); 78 static void ahci_reset(struct ahci_channel *ch); 79 static void ahci_start(struct ahci_channel *ch, int fbs); 80 static void ahci_stop(struct ahci_channel *ch); 81 static void ahci_clo(struct ahci_channel *ch); 82 static void ahci_start_fr(struct ahci_channel *ch); 83 static void ahci_stop_fr(struct ahci_channel *ch); 84 static int ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr); 85 static uint32_t ahci_ch_detval(struct ahci_channel *ch, uint32_t val); 86 87 static int ahci_sata_connect(struct ahci_channel *ch); 88 static int ahci_sata_phy_reset(struct ahci_channel *ch); 89 static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0); 90 91 static void ahci_issue_recovery(struct ahci_channel *ch); 92 static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb); 93 static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb); 94 95 static void ahciaction(struct cam_sim *sim, union ccb *ccb); 96 static void ahcipoll(struct cam_sim *sim); 97 98 static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers"); 99 100 #define recovery_type spriv_field0 101 #define RECOVERY_NONE 0 102 #define RECOVERY_READ_LOG 1 103 #define RECOVERY_REQUEST_SENSE 2 104 #define recovery_slot spriv_field1 105 106 static uint32_t 107 ahci_ch_detval(struct ahci_channel *ch, uint32_t val) 108 { 109 110 return ch->disablephy ? ATA_SC_DET_DISABLE : val; 111 } 112 113 int 114 ahci_ctlr_setup(device_t dev) 115 { 116 struct ahci_controller *ctlr = device_get_softc(dev); 117 /* Clear interrupts */ 118 ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS)); 119 /* Configure CCC */ 120 if (ctlr->ccc) { 121 ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI)); 122 ATA_OUTL(ctlr->r_mem, AHCI_CCCC, 123 (ctlr->ccc << AHCI_CCCC_TV_SHIFT) | 124 (4 << AHCI_CCCC_CC_SHIFT) | 125 AHCI_CCCC_EN); 126 ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) & 127 AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT; 128 if (bootverbose) { 129 device_printf(dev, 130 "CCC with %dms/4cmd enabled on vector %d\n", 131 ctlr->ccc, ctlr->cccv); 132 } 133 } 134 /* Enable AHCI interrupts */ 135 ATA_OUTL(ctlr->r_mem, AHCI_GHC, 136 ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE); 137 return (0); 138 } 139 140 int 141 ahci_ctlr_reset(device_t dev) 142 { 143 struct ahci_controller *ctlr = device_get_softc(dev); 144 int timeout; 145 146 /* Enable AHCI mode */ 147 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); 148 /* Reset AHCI controller */ 149 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR); 150 for (timeout = 1000; timeout > 0; timeout--) { 151 DELAY(1000); 152 if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0) 153 break; 154 } 155 if (timeout == 0) { 156 device_printf(dev, "AHCI controller reset failure\n"); 157 return (ENXIO); 158 } 159 /* Reenable AHCI mode */ 160 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); 161 162 if (ctlr->quirks & AHCI_Q_RESTORE_CAP) { 163 /* 164 * Restore capability field. 165 * This is write to a read-only register to restore its state. 166 * On fully standard-compliant hardware this is not needed and 167 * this operation shall not take place. See ahci_pci.c for 168 * platforms using this quirk. 169 */ 170 ATA_OUTL(ctlr->r_mem, AHCI_CAP, ctlr->caps); 171 } 172 173 return (0); 174 } 175 176 177 int 178 ahci_attach(device_t dev) 179 { 180 struct ahci_controller *ctlr = device_get_softc(dev); 181 int error, i, speed, unit; 182 uint32_t u, version; 183 device_t child; 184 185 ctlr->dev = dev; 186 ctlr->ccc = 0; 187 resource_int_value(device_get_name(dev), 188 device_get_unit(dev), "ccc", &ctlr->ccc); 189 190 /* Setup our own memory management for channels. */ 191 ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); 192 ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); 193 ctlr->sc_iomem.rm_type = RMAN_ARRAY; 194 ctlr->sc_iomem.rm_descr = "I/O memory addresses"; 195 if ((error = rman_init(&ctlr->sc_iomem)) != 0) { 196 ahci_free_mem(dev); 197 return (error); 198 } 199 if ((error = rman_manage_region(&ctlr->sc_iomem, 200 rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { 201 ahci_free_mem(dev); 202 rman_fini(&ctlr->sc_iomem); 203 return (error); 204 } 205 /* Get the HW capabilities */ 206 version = ATA_INL(ctlr->r_mem, AHCI_VS); 207 ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP); 208 if (version >= 0x00010200) 209 ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2); 210 if (ctlr->caps & AHCI_CAP_EMS) 211 ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL); 212 213 if (ctlr->quirks & AHCI_Q_FORCE_PI) { 214 /* 215 * Enable ports. 216 * The spec says that BIOS sets up bits corresponding to 217 * available ports. On platforms where this information 218 * is missing, the driver can define available ports on its own. 219 */ 220 int nports = (ctlr->caps & AHCI_CAP_NPMASK) + 1; 221 int nmask = (1 << nports) - 1; 222 223 ATA_OUTL(ctlr->r_mem, AHCI_PI, nmask); 224 device_printf(dev, "Forcing PI to %d ports (mask = %x)\n", 225 nports, nmask); 226 } 227 228 ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI); 229 230 /* Identify and set separate quirks for HBA and RAID f/w Marvells. */ 231 if ((ctlr->quirks & AHCI_Q_ALTSIG) && 232 (ctlr->caps & AHCI_CAP_SPM) == 0) 233 ctlr->quirks |= AHCI_Q_NOBSYRES; 234 235 if (ctlr->quirks & AHCI_Q_1CH) { 236 ctlr->caps &= ~AHCI_CAP_NPMASK; 237 ctlr->ichannels &= 0x01; 238 } 239 if (ctlr->quirks & AHCI_Q_2CH) { 240 ctlr->caps &= ~AHCI_CAP_NPMASK; 241 ctlr->caps |= 1; 242 ctlr->ichannels &= 0x03; 243 } 244 if (ctlr->quirks & AHCI_Q_4CH) { 245 ctlr->caps &= ~AHCI_CAP_NPMASK; 246 ctlr->caps |= 3; 247 ctlr->ichannels &= 0x0f; 248 } 249 ctlr->channels = MAX(flsl(ctlr->ichannels), 250 (ctlr->caps & AHCI_CAP_NPMASK) + 1); 251 if (ctlr->quirks & AHCI_Q_NOPMP) 252 ctlr->caps &= ~AHCI_CAP_SPM; 253 if (ctlr->quirks & AHCI_Q_NONCQ) 254 ctlr->caps &= ~AHCI_CAP_SNCQ; 255 if ((ctlr->caps & AHCI_CAP_CCCS) == 0) 256 ctlr->ccc = 0; 257 ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC); 258 259 /* Create controller-wide DMA tag. */ 260 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 261 (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR : 262 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 263 BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 264 ctlr->dma_coherent ? BUS_DMA_COHERENT : 0, NULL, NULL, 265 &ctlr->dma_tag)) { 266 ahci_free_mem(dev); 267 rman_fini(&ctlr->sc_iomem); 268 return (ENXIO); 269 } 270 271 ahci_ctlr_setup(dev); 272 273 /* Setup interrupts. */ 274 if ((error = ahci_setup_interrupt(dev)) != 0) { 275 bus_dma_tag_destroy(ctlr->dma_tag); 276 ahci_free_mem(dev); 277 rman_fini(&ctlr->sc_iomem); 278 return (error); 279 } 280 281 i = 0; 282 for (u = ctlr->ichannels; u != 0; u >>= 1) 283 i += (u & 1); 284 ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3)); 285 resource_int_value(device_get_name(dev), device_get_unit(dev), 286 "direct", &ctlr->direct); 287 /* Announce HW capabilities. */ 288 speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT; 289 device_printf(dev, 290 "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n", 291 ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f), 292 ((version >> 4) & 0xf0) + (version & 0x0f), 293 (ctlr->caps & AHCI_CAP_NPMASK) + 1, 294 ((speed == 1) ? "1.5":((speed == 2) ? "3": 295 ((speed == 3) ? "6":"?"))), 296 (ctlr->caps & AHCI_CAP_SPM) ? 297 "supported" : "not supported", 298 (ctlr->caps & AHCI_CAP_FBSS) ? 299 " with FBS" : ""); 300 if (ctlr->quirks != 0) { 301 device_printf(dev, "quirks=0x%b\n", ctlr->quirks, 302 AHCI_Q_BIT_STRING); 303 } 304 if (bootverbose) { 305 device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps", 306 (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"", 307 (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"", 308 (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"", 309 (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"", 310 (ctlr->caps & AHCI_CAP_SSS) ? " SS":"", 311 (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"", 312 (ctlr->caps & AHCI_CAP_SAL) ? " AL":"", 313 (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"", 314 ((speed == 1) ? "1.5":((speed == 2) ? "3": 315 ((speed == 3) ? "6":"?")))); 316 printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n", 317 (ctlr->caps & AHCI_CAP_SAM) ? " AM":"", 318 (ctlr->caps & AHCI_CAP_SPM) ? " PM":"", 319 (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"", 320 (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"", 321 (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"", 322 (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"", 323 ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1, 324 (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"", 325 (ctlr->caps & AHCI_CAP_EMS) ? " EM":"", 326 (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"", 327 (ctlr->caps & AHCI_CAP_NPMASK) + 1); 328 } 329 if (bootverbose && version >= 0x00010200) { 330 device_printf(dev, "Caps2:%s%s%s%s%s%s\n", 331 (ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"", 332 (ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"", 333 (ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"", 334 (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"", 335 (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"", 336 (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":""); 337 } 338 /* Attach all channels on this controller */ 339 for (unit = 0; unit < ctlr->channels; unit++) { 340 child = device_add_child(dev, "ahcich", -1); 341 if (child == NULL) { 342 device_printf(dev, "failed to add channel device\n"); 343 continue; 344 } 345 device_set_ivars(child, (void *)(intptr_t)unit); 346 if ((ctlr->ichannels & (1 << unit)) == 0) 347 device_disable(child); 348 } 349 if (ctlr->caps & AHCI_CAP_EMS) { 350 child = device_add_child(dev, "ahciem", -1); 351 if (child == NULL) 352 device_printf(dev, "failed to add enclosure device\n"); 353 else 354 device_set_ivars(child, (void *)(intptr_t)-1); 355 } 356 bus_generic_attach(dev); 357 return (0); 358 } 359 360 int 361 ahci_detach(device_t dev) 362 { 363 struct ahci_controller *ctlr = device_get_softc(dev); 364 int i; 365 366 /* Detach & delete all children */ 367 device_delete_children(dev); 368 369 /* Free interrupts. */ 370 for (i = 0; i < ctlr->numirqs; i++) { 371 if (ctlr->irqs[i].r_irq) { 372 bus_teardown_intr(dev, ctlr->irqs[i].r_irq, 373 ctlr->irqs[i].handle); 374 bus_release_resource(dev, SYS_RES_IRQ, 375 ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq); 376 } 377 } 378 bus_dma_tag_destroy(ctlr->dma_tag); 379 /* Free memory. */ 380 rman_fini(&ctlr->sc_iomem); 381 ahci_free_mem(dev); 382 return (0); 383 } 384 385 void 386 ahci_free_mem(device_t dev) 387 { 388 struct ahci_controller *ctlr = device_get_softc(dev); 389 390 /* Release memory resources */ 391 if (ctlr->r_mem) 392 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); 393 if (ctlr->r_msix_table) 394 bus_release_resource(dev, SYS_RES_MEMORY, 395 ctlr->r_msix_tab_rid, ctlr->r_msix_table); 396 if (ctlr->r_msix_pba) 397 bus_release_resource(dev, SYS_RES_MEMORY, 398 ctlr->r_msix_pba_rid, ctlr->r_msix_pba); 399 400 ctlr->r_msix_pba = ctlr->r_mem = ctlr->r_msix_table = NULL; 401 } 402 403 int 404 ahci_setup_interrupt(device_t dev) 405 { 406 struct ahci_controller *ctlr = device_get_softc(dev); 407 int i; 408 409 /* Check for single MSI vector fallback. */ 410 if (ctlr->numirqs > 1 && 411 (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) { 412 device_printf(dev, "Falling back to one MSI\n"); 413 ctlr->numirqs = 1; 414 } 415 416 /* Ensure we don't overrun irqs. */ 417 if (ctlr->numirqs > AHCI_MAX_IRQS) { 418 device_printf(dev, "Too many irqs %d > %d (clamping)\n", 419 ctlr->numirqs, AHCI_MAX_IRQS); 420 ctlr->numirqs = AHCI_MAX_IRQS; 421 } 422 423 /* Allocate all IRQs. */ 424 for (i = 0; i < ctlr->numirqs; i++) { 425 ctlr->irqs[i].ctlr = ctlr; 426 ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0); 427 if (ctlr->channels == 1 && !ctlr->ccc && ctlr->msi) 428 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; 429 else if (ctlr->numirqs == 1 || i >= ctlr->channels || 430 (ctlr->ccc && i == ctlr->cccv)) 431 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL; 432 else if (ctlr->channels > ctlr->numirqs && 433 i == ctlr->numirqs - 1) 434 ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER; 435 else 436 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; 437 if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 438 &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { 439 device_printf(dev, "unable to map interrupt\n"); 440 return (ENXIO); 441 } 442 if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL, 443 (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr : 444 ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge : 445 ahci_intr_one), 446 &ctlr->irqs[i], &ctlr->irqs[i].handle))) { 447 /* SOS XXX release r_irq */ 448 device_printf(dev, "unable to setup interrupt\n"); 449 return (ENXIO); 450 } 451 if (ctlr->numirqs > 1) { 452 bus_describe_intr(dev, ctlr->irqs[i].r_irq, 453 ctlr->irqs[i].handle, 454 ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ? 455 "ch%d" : "%d", i); 456 } 457 } 458 return (0); 459 } 460 461 /* 462 * Common case interrupt handler. 463 */ 464 static void 465 ahci_intr(void *data) 466 { 467 struct ahci_controller_irq *irq = data; 468 struct ahci_controller *ctlr = irq->ctlr; 469 u_int32_t is, ise = 0; 470 void *arg; 471 int unit; 472 473 if (irq->mode == AHCI_IRQ_MODE_ALL) { 474 unit = 0; 475 if (ctlr->ccc) 476 is = ctlr->ichannels; 477 else 478 is = ATA_INL(ctlr->r_mem, AHCI_IS); 479 } else { /* AHCI_IRQ_MODE_AFTER */ 480 unit = irq->r_irq_rid - 1; 481 is = ATA_INL(ctlr->r_mem, AHCI_IS); 482 is &= (0xffffffff << unit); 483 } 484 /* CCC interrupt is edge triggered. */ 485 if (ctlr->ccc) 486 ise = 1 << ctlr->cccv; 487 /* Some controllers have edge triggered IS. */ 488 if (ctlr->quirks & AHCI_Q_EDGEIS) 489 ise |= is; 490 if (ise != 0) 491 ATA_OUTL(ctlr->r_mem, AHCI_IS, ise); 492 for (; unit < ctlr->channels; unit++) { 493 if ((is & (1 << unit)) != 0 && 494 (arg = ctlr->interrupt[unit].argument)) { 495 ctlr->interrupt[unit].function(arg); 496 } 497 } 498 /* AHCI declares level triggered IS. */ 499 if (!(ctlr->quirks & AHCI_Q_EDGEIS)) 500 ATA_OUTL(ctlr->r_mem, AHCI_IS, is); 501 ATA_RBL(ctlr->r_mem, AHCI_IS); 502 } 503 504 /* 505 * Simplified interrupt handler for multivector MSI mode. 506 */ 507 static void 508 ahci_intr_one(void *data) 509 { 510 struct ahci_controller_irq *irq = data; 511 struct ahci_controller *ctlr = irq->ctlr; 512 void *arg; 513 int unit; 514 515 unit = irq->r_irq_rid - 1; 516 if ((arg = ctlr->interrupt[unit].argument)) 517 ctlr->interrupt[unit].function(arg); 518 /* AHCI declares level triggered IS. */ 519 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); 520 ATA_RBL(ctlr->r_mem, AHCI_IS); 521 } 522 523 static void 524 ahci_intr_one_edge(void *data) 525 { 526 struct ahci_controller_irq *irq = data; 527 struct ahci_controller *ctlr = irq->ctlr; 528 void *arg; 529 int unit; 530 531 unit = irq->r_irq_rid - 1; 532 /* Some controllers have edge triggered IS. */ 533 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); 534 if ((arg = ctlr->interrupt[unit].argument)) 535 ctlr->interrupt[unit].function(arg); 536 ATA_RBL(ctlr->r_mem, AHCI_IS); 537 } 538 539 struct resource * 540 ahci_alloc_resource(device_t dev, device_t child, int type, int *rid, 541 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 542 { 543 struct ahci_controller *ctlr = device_get_softc(dev); 544 struct resource *res; 545 rman_res_t st; 546 int offset, size, unit; 547 548 unit = (intptr_t)device_get_ivars(child); 549 res = NULL; 550 switch (type) { 551 case SYS_RES_MEMORY: 552 if (unit >= 0) { 553 offset = AHCI_OFFSET + (unit << 7); 554 size = 128; 555 } else if (*rid == 0) { 556 offset = AHCI_EM_CTL; 557 size = 4; 558 } else { 559 offset = (ctlr->emloc & 0xffff0000) >> 14; 560 size = (ctlr->emloc & 0x0000ffff) << 2; 561 if (*rid != 1) { 562 if (*rid == 2 && (ctlr->capsem & 563 (AHCI_EM_XMT | AHCI_EM_SMB)) == 0) 564 offset += size; 565 else 566 break; 567 } 568 } 569 st = rman_get_start(ctlr->r_mem); 570 res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, 571 st + offset + size - 1, size, RF_ACTIVE, child); 572 if (res) { 573 bus_space_handle_t bsh; 574 bus_space_tag_t bst; 575 bsh = rman_get_bushandle(ctlr->r_mem); 576 bst = rman_get_bustag(ctlr->r_mem); 577 bus_space_subregion(bst, bsh, offset, 128, &bsh); 578 rman_set_bushandle(res, bsh); 579 rman_set_bustag(res, bst); 580 } 581 break; 582 case SYS_RES_IRQ: 583 if (*rid == ATA_IRQ_RID) 584 res = ctlr->irqs[0].r_irq; 585 break; 586 } 587 return (res); 588 } 589 590 int 591 ahci_release_resource(device_t dev, device_t child, int type, int rid, 592 struct resource *r) 593 { 594 595 switch (type) { 596 case SYS_RES_MEMORY: 597 rman_release_resource(r); 598 return (0); 599 case SYS_RES_IRQ: 600 if (rid != ATA_IRQ_RID) 601 return (ENOENT); 602 return (0); 603 } 604 return (EINVAL); 605 } 606 607 int 608 ahci_setup_intr(device_t dev, device_t child, struct resource *irq, 609 int flags, driver_filter_t *filter, driver_intr_t *function, 610 void *argument, void **cookiep) 611 { 612 struct ahci_controller *ctlr = device_get_softc(dev); 613 int unit = (intptr_t)device_get_ivars(child); 614 615 if (filter != NULL) { 616 printf("ahci.c: we cannot use a filter here\n"); 617 return (EINVAL); 618 } 619 ctlr->interrupt[unit].function = function; 620 ctlr->interrupt[unit].argument = argument; 621 return (0); 622 } 623 624 int 625 ahci_teardown_intr(device_t dev, device_t child, struct resource *irq, 626 void *cookie) 627 { 628 struct ahci_controller *ctlr = device_get_softc(dev); 629 int unit = (intptr_t)device_get_ivars(child); 630 631 ctlr->interrupt[unit].function = NULL; 632 ctlr->interrupt[unit].argument = NULL; 633 return (0); 634 } 635 636 int 637 ahci_print_child(device_t dev, device_t child) 638 { 639 int retval, channel; 640 641 retval = bus_print_child_header(dev, child); 642 channel = (int)(intptr_t)device_get_ivars(child); 643 if (channel >= 0) 644 retval += printf(" at channel %d", channel); 645 retval += bus_print_child_footer(dev, child); 646 return (retval); 647 } 648 649 int 650 ahci_child_location_str(device_t dev, device_t child, char *buf, 651 size_t buflen) 652 { 653 int channel; 654 655 channel = (int)(intptr_t)device_get_ivars(child); 656 if (channel >= 0) 657 snprintf(buf, buflen, "channel=%d", channel); 658 return (0); 659 } 660 661 bus_dma_tag_t 662 ahci_get_dma_tag(device_t dev, device_t child) 663 { 664 struct ahci_controller *ctlr = device_get_softc(dev); 665 666 return (ctlr->dma_tag); 667 } 668 669 static int 670 ahci_ch_probe(device_t dev) 671 { 672 673 device_set_desc_copy(dev, "AHCI channel"); 674 return (BUS_PROBE_DEFAULT); 675 } 676 677 static int 678 ahci_ch_disablephy_proc(SYSCTL_HANDLER_ARGS) 679 { 680 struct ahci_channel *ch; 681 int error, value; 682 683 ch = arg1; 684 value = ch->disablephy; 685 error = sysctl_handle_int(oidp, &value, 0, req); 686 if (error != 0 || req->newptr == NULL || (value != 0 && value != 1)) 687 return (error); 688 689 mtx_lock(&ch->mtx); 690 ch->disablephy = value; 691 if (value) { 692 ahci_ch_deinit(ch->dev); 693 } else { 694 ahci_ch_init(ch->dev); 695 ahci_phy_check_events(ch, ATA_SE_PHY_CHANGED | ATA_SE_EXCHANGED); 696 } 697 mtx_unlock(&ch->mtx); 698 699 return (0); 700 } 701 702 static int 703 ahci_ch_attach(device_t dev) 704 { 705 struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev)); 706 struct ahci_channel *ch = device_get_softc(dev); 707 struct cam_devq *devq; 708 struct sysctl_ctx_list *ctx; 709 struct sysctl_oid *tree; 710 int rid, error, i, sata_rev = 0; 711 u_int32_t version; 712 713 ch->dev = dev; 714 ch->unit = (intptr_t)device_get_ivars(dev); 715 ch->caps = ctlr->caps; 716 ch->caps2 = ctlr->caps2; 717 ch->start = ctlr->ch_start; 718 ch->quirks = ctlr->quirks; 719 ch->vendorid = ctlr->vendorid; 720 ch->deviceid = ctlr->deviceid; 721 ch->subvendorid = ctlr->subvendorid; 722 ch->subdeviceid = ctlr->subdeviceid; 723 ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1; 724 mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF); 725 ch->pm_level = 0; 726 resource_int_value(device_get_name(dev), 727 device_get_unit(dev), "pm_level", &ch->pm_level); 728 STAILQ_INIT(&ch->doneq); 729 if (ch->pm_level > 3) 730 callout_init_mtx(&ch->pm_timer, &ch->mtx, 0); 731 callout_init_mtx(&ch->reset_timer, &ch->mtx, 0); 732 /* JMicron external ports (0) sometimes limited */ 733 if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0) 734 sata_rev = 1; 735 if (ch->quirks & AHCI_Q_SATA2) 736 sata_rev = 2; 737 resource_int_value(device_get_name(dev), 738 device_get_unit(dev), "sata_rev", &sata_rev); 739 for (i = 0; i < 16; i++) { 740 ch->user[i].revision = sata_rev; 741 ch->user[i].mode = 0; 742 ch->user[i].bytecount = 8192; 743 ch->user[i].tags = ch->numslots; 744 ch->user[i].caps = 0; 745 ch->curr[i] = ch->user[i]; 746 if (ch->pm_level) { 747 ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ | 748 CTS_SATA_CAPS_H_APST | 749 CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST; 750 } 751 ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA | 752 CTS_SATA_CAPS_H_AN; 753 } 754 rid = 0; 755 if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 756 &rid, RF_ACTIVE))) 757 return (ENXIO); 758 ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD); 759 version = ATA_INL(ctlr->r_mem, AHCI_VS); 760 if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS)) 761 ch->chcaps |= AHCI_P_CMD_FBSCP; 762 if (ch->caps2 & AHCI_CAP2_SDS) 763 ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP); 764 if (bootverbose) { 765 device_printf(dev, "Caps:%s%s%s%s%s%s\n", 766 (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"", 767 (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"", 768 (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"", 769 (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"", 770 (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"", 771 (ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":""); 772 } 773 ahci_dmainit(dev); 774 ahci_slotsalloc(dev); 775 mtx_lock(&ch->mtx); 776 ahci_ch_init(dev); 777 rid = ATA_IRQ_RID; 778 if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 779 &rid, RF_SHAREABLE | RF_ACTIVE))) { 780 device_printf(dev, "Unable to map interrupt\n"); 781 error = ENXIO; 782 goto err0; 783 } 784 if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 785 ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr, 786 ch, &ch->ih))) { 787 device_printf(dev, "Unable to setup interrupt\n"); 788 error = ENXIO; 789 goto err1; 790 } 791 /* Create the device queue for our SIM. */ 792 devq = cam_simq_alloc(ch->numslots); 793 if (devq == NULL) { 794 device_printf(dev, "Unable to allocate simq\n"); 795 error = ENOMEM; 796 goto err1; 797 } 798 /* Construct SIM entry */ 799 ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch, 800 device_get_unit(dev), (struct mtx *)&ch->mtx, 801 (ch->quirks & AHCI_Q_NOCCS) ? 1 : min(2, ch->numslots), 802 (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0, 803 devq); 804 if (ch->sim == NULL) { 805 cam_simq_free(devq); 806 device_printf(dev, "unable to allocate sim\n"); 807 error = ENOMEM; 808 goto err1; 809 } 810 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { 811 device_printf(dev, "unable to register xpt bus\n"); 812 error = ENXIO; 813 goto err2; 814 } 815 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), 816 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 817 device_printf(dev, "unable to create path\n"); 818 error = ENXIO; 819 goto err3; 820 } 821 if (ch->pm_level > 3) { 822 callout_reset(&ch->pm_timer, 823 (ch->pm_level == 4) ? hz / 1000 : hz / 8, 824 ahci_ch_pm, ch); 825 } 826 mtx_unlock(&ch->mtx); 827 ctx = device_get_sysctl_ctx(dev); 828 tree = device_get_sysctl_tree(dev); 829 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "disable_phy", 830 CTLFLAG_RW | CTLTYPE_UINT, ch, 0, ahci_ch_disablephy_proc, "IU", 831 "Disable PHY"); 832 return (0); 833 834 err3: 835 xpt_bus_deregister(cam_sim_path(ch->sim)); 836 err2: 837 cam_sim_free(ch->sim, /*free_devq*/TRUE); 838 err1: 839 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 840 err0: 841 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); 842 mtx_unlock(&ch->mtx); 843 mtx_destroy(&ch->mtx); 844 return (error); 845 } 846 847 static int 848 ahci_ch_detach(device_t dev) 849 { 850 struct ahci_channel *ch = device_get_softc(dev); 851 852 mtx_lock(&ch->mtx); 853 xpt_async(AC_LOST_DEVICE, ch->path, NULL); 854 /* Forget about reset. */ 855 if (ch->resetting) { 856 ch->resetting = 0; 857 xpt_release_simq(ch->sim, TRUE); 858 } 859 xpt_free_path(ch->path); 860 xpt_bus_deregister(cam_sim_path(ch->sim)); 861 cam_sim_free(ch->sim, /*free_devq*/TRUE); 862 mtx_unlock(&ch->mtx); 863 864 if (ch->pm_level > 3) 865 callout_drain(&ch->pm_timer); 866 callout_drain(&ch->reset_timer); 867 bus_teardown_intr(dev, ch->r_irq, ch->ih); 868 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 869 870 ahci_ch_deinit(dev); 871 ahci_slotsfree(dev); 872 ahci_dmafini(dev); 873 874 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); 875 mtx_destroy(&ch->mtx); 876 return (0); 877 } 878 879 static int 880 ahci_ch_init(device_t dev) 881 { 882 struct ahci_channel *ch = device_get_softc(dev); 883 uint64_t work; 884 885 /* Disable port interrupts */ 886 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 887 /* Setup work areas */ 888 work = ch->dma.work_bus + AHCI_CL_OFFSET; 889 ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff); 890 ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32); 891 work = ch->dma.rfis_bus; 892 ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff); 893 ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32); 894 /* Activate the channel and power/spin up device */ 895 ATA_OUTL(ch->r_mem, AHCI_P_CMD, 896 (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD | 897 ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) | 898 ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 ))); 899 ahci_start_fr(ch); 900 ahci_start(ch, 1); 901 return (0); 902 } 903 904 static int 905 ahci_ch_deinit(device_t dev) 906 { 907 struct ahci_channel *ch = device_get_softc(dev); 908 909 /* Disable port interrupts. */ 910 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 911 /* Reset command register. */ 912 ahci_stop(ch); 913 ahci_stop_fr(ch); 914 ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0); 915 /* Allow everything, including partial and slumber modes. */ 916 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0); 917 /* Request slumber mode transition and give some time to get there. */ 918 ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER); 919 DELAY(100); 920 /* Disable PHY. */ 921 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); 922 return (0); 923 } 924 925 static int 926 ahci_ch_suspend(device_t dev) 927 { 928 struct ahci_channel *ch = device_get_softc(dev); 929 930 mtx_lock(&ch->mtx); 931 xpt_freeze_simq(ch->sim, 1); 932 /* Forget about reset. */ 933 if (ch->resetting) { 934 ch->resetting = 0; 935 callout_stop(&ch->reset_timer); 936 xpt_release_simq(ch->sim, TRUE); 937 } 938 while (ch->oslots) 939 msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100); 940 ahci_ch_deinit(dev); 941 mtx_unlock(&ch->mtx); 942 return (0); 943 } 944 945 static int 946 ahci_ch_resume(device_t dev) 947 { 948 struct ahci_channel *ch = device_get_softc(dev); 949 950 mtx_lock(&ch->mtx); 951 ahci_ch_init(dev); 952 ahci_reset(ch); 953 xpt_release_simq(ch->sim, TRUE); 954 mtx_unlock(&ch->mtx); 955 return (0); 956 } 957 958 devclass_t ahcich_devclass; 959 static device_method_t ahcich_methods[] = { 960 DEVMETHOD(device_probe, ahci_ch_probe), 961 DEVMETHOD(device_attach, ahci_ch_attach), 962 DEVMETHOD(device_detach, ahci_ch_detach), 963 DEVMETHOD(device_suspend, ahci_ch_suspend), 964 DEVMETHOD(device_resume, ahci_ch_resume), 965 DEVMETHOD_END 966 }; 967 static driver_t ahcich_driver = { 968 "ahcich", 969 ahcich_methods, 970 sizeof(struct ahci_channel) 971 }; 972 DRIVER_MODULE(ahcich, ahci, ahcich_driver, ahcich_devclass, NULL, NULL); 973 974 struct ahci_dc_cb_args { 975 bus_addr_t maddr; 976 int error; 977 }; 978 979 static void 980 ahci_dmainit(device_t dev) 981 { 982 struct ahci_channel *ch = device_get_softc(dev); 983 struct ahci_dc_cb_args dcba; 984 size_t rfsize; 985 986 /* Command area. */ 987 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, 988 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 989 NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE, 990 0, NULL, NULL, &ch->dma.work_tag)) 991 goto error; 992 if (bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, 993 BUS_DMA_ZERO, &ch->dma.work_map)) 994 goto error; 995 if (bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, 996 AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) { 997 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); 998 goto error; 999 } 1000 ch->dma.work_bus = dcba.maddr; 1001 /* FIS receive area. */ 1002 if (ch->chcaps & AHCI_P_CMD_FBSCP) 1003 rfsize = 4096; 1004 else 1005 rfsize = 256; 1006 if (bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0, 1007 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1008 NULL, NULL, rfsize, 1, rfsize, 1009 0, NULL, NULL, &ch->dma.rfis_tag)) 1010 goto error; 1011 if (bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0, 1012 &ch->dma.rfis_map)) 1013 goto error; 1014 if (bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis, 1015 rfsize, ahci_dmasetupc_cb, &dcba, 0) || dcba.error) { 1016 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); 1017 goto error; 1018 } 1019 ch->dma.rfis_bus = dcba.maddr; 1020 /* Data area. */ 1021 if (bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, 1022 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1023 NULL, NULL, 1024 AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots, 1025 AHCI_SG_ENTRIES, AHCI_PRD_MAX, 1026 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) { 1027 goto error; 1028 } 1029 return; 1030 1031 error: 1032 device_printf(dev, "WARNING - DMA initialization failed\n"); 1033 ahci_dmafini(dev); 1034 } 1035 1036 static void 1037 ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 1038 { 1039 struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc; 1040 1041 if (!(dcba->error = error)) 1042 dcba->maddr = segs[0].ds_addr; 1043 } 1044 1045 static void 1046 ahci_dmafini(device_t dev) 1047 { 1048 struct ahci_channel *ch = device_get_softc(dev); 1049 1050 if (ch->dma.data_tag) { 1051 bus_dma_tag_destroy(ch->dma.data_tag); 1052 ch->dma.data_tag = NULL; 1053 } 1054 if (ch->dma.rfis_bus) { 1055 bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map); 1056 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); 1057 ch->dma.rfis_bus = 0; 1058 ch->dma.rfis = NULL; 1059 } 1060 if (ch->dma.work_bus) { 1061 bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); 1062 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); 1063 ch->dma.work_bus = 0; 1064 ch->dma.work = NULL; 1065 } 1066 if (ch->dma.work_tag) { 1067 bus_dma_tag_destroy(ch->dma.work_tag); 1068 ch->dma.work_tag = NULL; 1069 } 1070 } 1071 1072 static void 1073 ahci_slotsalloc(device_t dev) 1074 { 1075 struct ahci_channel *ch = device_get_softc(dev); 1076 int i; 1077 1078 /* Alloc and setup command/dma slots */ 1079 bzero(ch->slot, sizeof(ch->slot)); 1080 for (i = 0; i < ch->numslots; i++) { 1081 struct ahci_slot *slot = &ch->slot[i]; 1082 1083 slot->ch = ch; 1084 slot->slot = i; 1085 slot->state = AHCI_SLOT_EMPTY; 1086 slot->ccb = NULL; 1087 callout_init_mtx(&slot->timeout, &ch->mtx, 0); 1088 1089 if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) 1090 device_printf(ch->dev, "FAILURE - create data_map\n"); 1091 } 1092 } 1093 1094 static void 1095 ahci_slotsfree(device_t dev) 1096 { 1097 struct ahci_channel *ch = device_get_softc(dev); 1098 int i; 1099 1100 /* Free all dma slots */ 1101 for (i = 0; i < ch->numslots; i++) { 1102 struct ahci_slot *slot = &ch->slot[i]; 1103 1104 callout_drain(&slot->timeout); 1105 if (slot->dma.data_map) { 1106 bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); 1107 slot->dma.data_map = NULL; 1108 } 1109 } 1110 } 1111 1112 static int 1113 ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr) 1114 { 1115 1116 if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) || 1117 ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) { 1118 u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS); 1119 union ccb *ccb; 1120 1121 if (bootverbose) { 1122 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) 1123 device_printf(ch->dev, "CONNECT requested\n"); 1124 else 1125 device_printf(ch->dev, "DISCONNECT requested\n"); 1126 } 1127 ahci_reset(ch); 1128 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 1129 return (0); 1130 if (xpt_create_path(&ccb->ccb_h.path, NULL, 1131 cam_sim_path(ch->sim), 1132 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1133 xpt_free_ccb(ccb); 1134 return (0); 1135 } 1136 xpt_rescan(ccb); 1137 return (1); 1138 } 1139 return (0); 1140 } 1141 1142 static void 1143 ahci_cpd_check_events(struct ahci_channel *ch) 1144 { 1145 u_int32_t status; 1146 union ccb *ccb; 1147 device_t dev; 1148 1149 if (ch->pm_level == 0) 1150 return; 1151 1152 status = ATA_INL(ch->r_mem, AHCI_P_CMD); 1153 if ((status & AHCI_P_CMD_CPD) == 0) 1154 return; 1155 1156 if (bootverbose) { 1157 dev = ch->dev; 1158 if (status & AHCI_P_CMD_CPS) { 1159 device_printf(dev, "COLD CONNECT requested\n"); 1160 } else 1161 device_printf(dev, "COLD DISCONNECT requested\n"); 1162 } 1163 ahci_reset(ch); 1164 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 1165 return; 1166 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), 1167 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1168 xpt_free_ccb(ccb); 1169 return; 1170 } 1171 xpt_rescan(ccb); 1172 } 1173 1174 static void 1175 ahci_notify_events(struct ahci_channel *ch, u_int32_t status) 1176 { 1177 struct cam_path *dpath; 1178 int i; 1179 1180 if (ch->caps & AHCI_CAP_SSNTF) 1181 ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status); 1182 if (bootverbose) 1183 device_printf(ch->dev, "SNTF 0x%04x\n", status); 1184 for (i = 0; i < 16; i++) { 1185 if ((status & (1 << i)) == 0) 1186 continue; 1187 if (xpt_create_path(&dpath, NULL, 1188 xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { 1189 xpt_async(AC_SCSI_AEN, dpath, NULL); 1190 xpt_free_path(dpath); 1191 } 1192 } 1193 } 1194 1195 static void 1196 ahci_done(struct ahci_channel *ch, union ccb *ccb) 1197 { 1198 1199 mtx_assert(&ch->mtx, MA_OWNED); 1200 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 || 1201 ch->batch == 0) { 1202 xpt_done(ccb); 1203 return; 1204 } 1205 1206 STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe); 1207 } 1208 1209 static void 1210 ahci_ch_intr(void *arg) 1211 { 1212 struct ahci_channel *ch = (struct ahci_channel *)arg; 1213 uint32_t istatus; 1214 1215 /* Read interrupt statuses. */ 1216 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 1217 1218 mtx_lock(&ch->mtx); 1219 ahci_ch_intr_main(ch, istatus); 1220 mtx_unlock(&ch->mtx); 1221 } 1222 1223 static void 1224 ahci_ch_intr_direct(void *arg) 1225 { 1226 struct ahci_channel *ch = (struct ahci_channel *)arg; 1227 struct ccb_hdr *ccb_h; 1228 uint32_t istatus; 1229 STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq); 1230 1231 /* Read interrupt statuses. */ 1232 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 1233 1234 mtx_lock(&ch->mtx); 1235 ch->batch = 1; 1236 ahci_ch_intr_main(ch, istatus); 1237 ch->batch = 0; 1238 /* 1239 * Prevent the possibility of issues caused by processing the queue 1240 * while unlocked below by moving the contents to a local queue. 1241 */ 1242 STAILQ_CONCAT(&tmp_doneq, &ch->doneq); 1243 mtx_unlock(&ch->mtx); 1244 while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) { 1245 STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe); 1246 xpt_done_direct((union ccb *)ccb_h); 1247 } 1248 } 1249 1250 static void 1251 ahci_ch_pm(void *arg) 1252 { 1253 struct ahci_channel *ch = (struct ahci_channel *)arg; 1254 uint32_t work; 1255 1256 if (ch->numrslots != 0) 1257 return; 1258 work = ATA_INL(ch->r_mem, AHCI_P_CMD); 1259 if (ch->pm_level == 4) 1260 work |= AHCI_P_CMD_PARTIAL; 1261 else 1262 work |= AHCI_P_CMD_SLUMBER; 1263 ATA_OUTL(ch->r_mem, AHCI_P_CMD, work); 1264 } 1265 1266 static void 1267 ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus) 1268 { 1269 uint32_t cstatus, serr = 0, sntf = 0, ok, err; 1270 enum ahci_err_type et; 1271 int i, ccs, port, reset = 0; 1272 1273 /* Clear interrupt statuses. */ 1274 ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus); 1275 /* Read command statuses. */ 1276 if (ch->numtslots != 0) 1277 cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); 1278 else 1279 cstatus = 0; 1280 if (ch->numrslots != ch->numtslots) 1281 cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI); 1282 /* Read SNTF in one of possible ways. */ 1283 if ((istatus & AHCI_P_IX_SDB) && 1284 (ch->pm_present || ch->curr[0].atapi != 0)) { 1285 if (ch->caps & AHCI_CAP_SSNTF) 1286 sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF); 1287 else if (ch->fbs_enabled) { 1288 u_int8_t *fis = ch->dma.rfis + 0x58; 1289 1290 for (i = 0; i < 16; i++) { 1291 if (fis[1] & 0x80) { 1292 fis[1] &= 0x7f; 1293 sntf |= 1 << i; 1294 } 1295 fis += 256; 1296 } 1297 } else { 1298 u_int8_t *fis = ch->dma.rfis + 0x58; 1299 1300 if (fis[1] & 0x80) 1301 sntf = (1 << (fis[1] & 0x0f)); 1302 } 1303 } 1304 /* Process PHY events */ 1305 if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF | 1306 AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { 1307 serr = ATA_INL(ch->r_mem, AHCI_P_SERR); 1308 if (serr) { 1309 ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr); 1310 reset = ahci_phy_check_events(ch, serr); 1311 } 1312 } 1313 /* Process cold presence detection events */ 1314 if ((istatus & AHCI_P_IX_CPD) && !reset) 1315 ahci_cpd_check_events(ch); 1316 /* Process command errors */ 1317 if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF | 1318 AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { 1319 if (ch->quirks & AHCI_Q_NOCCS) { 1320 /* 1321 * ASMedia chips sometimes report failed commands as 1322 * completed. Count all running commands as failed. 1323 */ 1324 cstatus |= ch->rslots; 1325 1326 /* They also report wrong CCS, so try to guess one. */ 1327 ccs = powerof2(cstatus) ? ffs(cstatus) - 1 : -1; 1328 } else { 1329 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & 1330 AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; 1331 } 1332 //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n", 1333 // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), 1334 // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs); 1335 port = -1; 1336 if (ch->fbs_enabled) { 1337 uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS); 1338 if (fbs & AHCI_P_FBS_SDE) { 1339 port = (fbs & AHCI_P_FBS_DWE) 1340 >> AHCI_P_FBS_DWE_SHIFT; 1341 } else { 1342 for (i = 0; i < 16; i++) { 1343 if (ch->numrslotspd[i] == 0) 1344 continue; 1345 if (port == -1) 1346 port = i; 1347 else if (port != i) { 1348 port = -2; 1349 break; 1350 } 1351 } 1352 } 1353 } 1354 err = ch->rslots & cstatus; 1355 } else { 1356 ccs = 0; 1357 err = 0; 1358 port = -1; 1359 } 1360 /* Complete all successful commands. */ 1361 ok = ch->rslots & ~cstatus; 1362 for (i = 0; i < ch->numslots; i++) { 1363 if ((ok >> i) & 1) 1364 ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE); 1365 } 1366 /* On error, complete the rest of commands with error statuses. */ 1367 if (err) { 1368 if (ch->frozen) { 1369 union ccb *fccb = ch->frozen; 1370 ch->frozen = NULL; 1371 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 1372 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 1373 xpt_freeze_devq(fccb->ccb_h.path, 1); 1374 fccb->ccb_h.status |= CAM_DEV_QFRZN; 1375 } 1376 ahci_done(ch, fccb); 1377 } 1378 for (i = 0; i < ch->numslots; i++) { 1379 /* XXX: reqests in loading state. */ 1380 if (((err >> i) & 1) == 0) 1381 continue; 1382 if (port >= 0 && 1383 ch->slot[i].ccb->ccb_h.target_id != port) 1384 continue; 1385 if (istatus & AHCI_P_IX_TFE) { 1386 if (port != -2) { 1387 /* Task File Error */ 1388 if (ch->numtslotspd[ 1389 ch->slot[i].ccb->ccb_h.target_id] == 0) { 1390 /* Untagged operation. */ 1391 if (i == ccs) 1392 et = AHCI_ERR_TFE; 1393 else 1394 et = AHCI_ERR_INNOCENT; 1395 } else { 1396 /* Tagged operation. */ 1397 et = AHCI_ERR_NCQ; 1398 } 1399 } else { 1400 et = AHCI_ERR_TFE; 1401 ch->fatalerr = 1; 1402 } 1403 } else if (istatus & AHCI_P_IX_IF) { 1404 if (ch->numtslots == 0 && i != ccs && port != -2) 1405 et = AHCI_ERR_INNOCENT; 1406 else 1407 et = AHCI_ERR_SATA; 1408 } else 1409 et = AHCI_ERR_INVALID; 1410 ahci_end_transaction(&ch->slot[i], et); 1411 } 1412 /* 1413 * We can't reinit port if there are some other 1414 * commands active, use resume to complete them. 1415 */ 1416 if (ch->rslots != 0 && !ch->recoverycmd) 1417 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC); 1418 } 1419 /* Process NOTIFY events */ 1420 if (sntf) 1421 ahci_notify_events(ch, sntf); 1422 } 1423 1424 /* Must be called with channel locked. */ 1425 static int 1426 ahci_check_collision(struct ahci_channel *ch, union ccb *ccb) 1427 { 1428 int t = ccb->ccb_h.target_id; 1429 1430 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1431 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1432 /* Tagged command while we have no supported tag free. */ 1433 if (((~ch->oslots) & (0xffffffff >> (32 - 1434 ch->curr[t].tags))) == 0) 1435 return (1); 1436 /* If we have FBS */ 1437 if (ch->fbs_enabled) { 1438 /* Tagged command while untagged are active. */ 1439 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0) 1440 return (1); 1441 } else { 1442 /* Tagged command while untagged are active. */ 1443 if (ch->numrslots != 0 && ch->numtslots == 0) 1444 return (1); 1445 /* Tagged command while tagged to other target is active. */ 1446 if (ch->numtslots != 0 && 1447 ch->taggedtarget != ccb->ccb_h.target_id) 1448 return (1); 1449 } 1450 } else { 1451 /* If we have FBS */ 1452 if (ch->fbs_enabled) { 1453 /* Untagged command while tagged are active. */ 1454 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0) 1455 return (1); 1456 } else { 1457 /* Untagged command while tagged are active. */ 1458 if (ch->numrslots != 0 && ch->numtslots != 0) 1459 return (1); 1460 } 1461 } 1462 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1463 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { 1464 /* Atomic command while anything active. */ 1465 if (ch->numrslots != 0) 1466 return (1); 1467 } 1468 /* We have some atomic command running. */ 1469 if (ch->aslots != 0) 1470 return (1); 1471 return (0); 1472 } 1473 1474 /* Must be called with channel locked. */ 1475 static void 1476 ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb) 1477 { 1478 struct ahci_slot *slot; 1479 int tag, tags; 1480 1481 /* Choose empty slot. */ 1482 tags = ch->numslots; 1483 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1484 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) 1485 tags = ch->curr[ccb->ccb_h.target_id].tags; 1486 if (ch->lastslot + 1 < tags) 1487 tag = ffs(~(ch->oslots >> (ch->lastslot + 1))); 1488 else 1489 tag = 0; 1490 if (tag == 0 || tag + ch->lastslot >= tags) 1491 tag = ffs(~ch->oslots) - 1; 1492 else 1493 tag += ch->lastslot; 1494 ch->lastslot = tag; 1495 /* Occupy chosen slot. */ 1496 slot = &ch->slot[tag]; 1497 slot->ccb = ccb; 1498 /* Stop PM timer. */ 1499 if (ch->numrslots == 0 && ch->pm_level > 3) 1500 callout_stop(&ch->pm_timer); 1501 /* Update channel stats. */ 1502 ch->oslots |= (1 << tag); 1503 ch->numrslots++; 1504 ch->numrslotspd[ccb->ccb_h.target_id]++; 1505 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1506 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1507 ch->numtslots++; 1508 ch->numtslotspd[ccb->ccb_h.target_id]++; 1509 ch->taggedtarget = ccb->ccb_h.target_id; 1510 } 1511 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1512 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) 1513 ch->aslots |= (1 << tag); 1514 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1515 slot->state = AHCI_SLOT_LOADING; 1516 bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, 1517 ahci_dmasetprd, slot, 0); 1518 } else { 1519 slot->dma.nsegs = 0; 1520 ahci_execute_transaction(slot); 1521 } 1522 } 1523 1524 /* Locked by busdma engine. */ 1525 static void 1526 ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1527 { 1528 struct ahci_slot *slot = arg; 1529 struct ahci_channel *ch = slot->ch; 1530 struct ahci_cmd_tab *ctp; 1531 struct ahci_dma_prd *prd; 1532 int i; 1533 1534 if (error) { 1535 device_printf(ch->dev, "DMA load error\n"); 1536 ahci_end_transaction(slot, AHCI_ERR_INVALID); 1537 return; 1538 } 1539 KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n")); 1540 /* Get a piece of the workspace for this request */ 1541 ctp = (struct ahci_cmd_tab *) 1542 (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); 1543 /* Fill S/G table */ 1544 prd = &ctp->prd_tab[0]; 1545 for (i = 0; i < nsegs; i++) { 1546 prd[i].dba = htole64(segs[i].ds_addr); 1547 prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK); 1548 } 1549 slot->dma.nsegs = nsegs; 1550 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, 1551 ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? 1552 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1553 ahci_execute_transaction(slot); 1554 } 1555 1556 /* Must be called with channel locked. */ 1557 static void 1558 ahci_execute_transaction(struct ahci_slot *slot) 1559 { 1560 struct ahci_channel *ch = slot->ch; 1561 struct ahci_cmd_tab *ctp; 1562 struct ahci_cmd_list *clp; 1563 union ccb *ccb = slot->ccb; 1564 int port = ccb->ccb_h.target_id & 0x0f; 1565 int fis_size, i, softreset; 1566 uint8_t *fis = ch->dma.rfis + 0x40; 1567 uint8_t val; 1568 uint16_t cmd_flags; 1569 1570 /* Get a piece of the workspace for this request */ 1571 ctp = (struct ahci_cmd_tab *) 1572 (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); 1573 /* Setup the FIS for this request */ 1574 if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) { 1575 device_printf(ch->dev, "Setting up SATA FIS failed\n"); 1576 ahci_end_transaction(slot, AHCI_ERR_INVALID); 1577 return; 1578 } 1579 /* Setup the command list entry */ 1580 clp = (struct ahci_cmd_list *) 1581 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); 1582 cmd_flags = 1583 (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) | 1584 (ccb->ccb_h.func_code == XPT_SCSI_IO ? 1585 (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) | 1586 (fis_size / sizeof(u_int32_t)) | 1587 (port << 12); 1588 clp->prd_length = htole16(slot->dma.nsegs); 1589 /* Special handling for Soft Reset command. */ 1590 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1591 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) { 1592 if (ccb->ataio.cmd.control & ATA_A_RESET) { 1593 softreset = 1; 1594 /* Kick controller into sane state */ 1595 ahci_stop(ch); 1596 ahci_clo(ch); 1597 ahci_start(ch, 0); 1598 cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY; 1599 } else { 1600 softreset = 2; 1601 /* Prepare FIS receive area for check. */ 1602 for (i = 0; i < 20; i++) 1603 fis[i] = 0xff; 1604 } 1605 } else 1606 softreset = 0; 1607 clp->bytecount = 0; 1608 clp->cmd_flags = htole16(cmd_flags); 1609 clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET + 1610 (AHCI_CT_SIZE * slot->slot)); 1611 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 1612 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1613 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, 1614 BUS_DMASYNC_PREREAD); 1615 /* Set ACTIVE bit for NCQ commands. */ 1616 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1617 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1618 ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot); 1619 } 1620 /* If FBS is enabled, set PMP port. */ 1621 if (ch->fbs_enabled) { 1622 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | 1623 (port << AHCI_P_FBS_DEV_SHIFT)); 1624 } 1625 /* Issue command to the controller. */ 1626 slot->state = AHCI_SLOT_RUNNING; 1627 ch->rslots |= (1 << slot->slot); 1628 ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot)); 1629 /* Device reset commands doesn't interrupt. Poll them. */ 1630 if (ccb->ccb_h.func_code == XPT_ATA_IO && 1631 (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) { 1632 int count, timeout = ccb->ccb_h.timeout * 100; 1633 enum ahci_err_type et = AHCI_ERR_NONE; 1634 1635 for (count = 0; count < timeout; count++) { 1636 DELAY(10); 1637 if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot))) 1638 break; 1639 if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) && 1640 softreset != 1) { 1641 #if 0 1642 device_printf(ch->dev, 1643 "Poll error on slot %d, TFD: %04x\n", 1644 slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD)); 1645 #endif 1646 et = AHCI_ERR_TFE; 1647 break; 1648 } 1649 /* Workaround for ATI SB600/SB700 chipsets. */ 1650 if (ccb->ccb_h.target_id == 15 && 1651 (ch->quirks & AHCI_Q_ATI_PMP_BUG) && 1652 (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) { 1653 et = AHCI_ERR_TIMEOUT; 1654 break; 1655 } 1656 } 1657 1658 /* 1659 * Some Marvell controllers require additional time 1660 * after soft reset to work properly. Setup delay 1661 * to 50ms after soft reset. 1662 */ 1663 if (ch->quirks & AHCI_Q_MRVL_SR_DEL) 1664 DELAY(50000); 1665 1666 /* 1667 * Marvell HBAs with non-RAID firmware do not wait for 1668 * readiness after soft reset, so we have to wait here. 1669 * Marvell RAIDs do not have this problem, but instead 1670 * sometimes forget to update FIS receive area, breaking 1671 * this wait. 1672 */ 1673 if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 && 1674 (ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 && 1675 softreset == 2 && et == AHCI_ERR_NONE) { 1676 for ( ; count < timeout; count++) { 1677 bus_dmamap_sync(ch->dma.rfis_tag, 1678 ch->dma.rfis_map, BUS_DMASYNC_POSTREAD); 1679 val = fis[2]; 1680 bus_dmamap_sync(ch->dma.rfis_tag, 1681 ch->dma.rfis_map, BUS_DMASYNC_PREREAD); 1682 if ((val & ATA_S_BUSY) == 0) 1683 break; 1684 DELAY(10); 1685 } 1686 } 1687 1688 if (timeout && (count >= timeout)) { 1689 device_printf(ch->dev, "Poll timeout on slot %d port %d\n", 1690 slot->slot, port); 1691 device_printf(ch->dev, "is %08x cs %08x ss %08x " 1692 "rs %08x tfd %02x serr %08x cmd %08x\n", 1693 ATA_INL(ch->r_mem, AHCI_P_IS), 1694 ATA_INL(ch->r_mem, AHCI_P_CI), 1695 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, 1696 ATA_INL(ch->r_mem, AHCI_P_TFD), 1697 ATA_INL(ch->r_mem, AHCI_P_SERR), 1698 ATA_INL(ch->r_mem, AHCI_P_CMD)); 1699 et = AHCI_ERR_TIMEOUT; 1700 } 1701 1702 /* Kick controller into sane state and enable FBS. */ 1703 if (softreset == 2) 1704 ch->eslots |= (1 << slot->slot); 1705 ahci_end_transaction(slot, et); 1706 return; 1707 } 1708 /* Start command execution timeout */ 1709 callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout / 2, 1710 0, (timeout_t*)ahci_timeout, slot, 0); 1711 return; 1712 } 1713 1714 /* Must be called with channel locked. */ 1715 static void 1716 ahci_process_timeout(struct ahci_channel *ch) 1717 { 1718 int i; 1719 1720 mtx_assert(&ch->mtx, MA_OWNED); 1721 /* Handle the rest of commands. */ 1722 for (i = 0; i < ch->numslots; i++) { 1723 /* Do we have a running request on slot? */ 1724 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 1725 continue; 1726 ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT); 1727 } 1728 } 1729 1730 /* Must be called with channel locked. */ 1731 static void 1732 ahci_rearm_timeout(struct ahci_channel *ch) 1733 { 1734 int i; 1735 1736 mtx_assert(&ch->mtx, MA_OWNED); 1737 for (i = 0; i < ch->numslots; i++) { 1738 struct ahci_slot *slot = &ch->slot[i]; 1739 1740 /* Do we have a running request on slot? */ 1741 if (slot->state < AHCI_SLOT_RUNNING) 1742 continue; 1743 if ((ch->toslots & (1 << i)) == 0) 1744 continue; 1745 callout_reset_sbt(&slot->timeout, 1746 SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, 1747 (timeout_t*)ahci_timeout, slot, 0); 1748 } 1749 } 1750 1751 /* Locked by callout mechanism. */ 1752 static void 1753 ahci_timeout(struct ahci_slot *slot) 1754 { 1755 struct ahci_channel *ch = slot->ch; 1756 device_t dev = ch->dev; 1757 uint32_t sstatus; 1758 int ccs; 1759 int i; 1760 1761 /* Check for stale timeout. */ 1762 if (slot->state < AHCI_SLOT_RUNNING) 1763 return; 1764 1765 /* Check if slot was not being executed last time we checked. */ 1766 if (slot->state < AHCI_SLOT_EXECUTING) { 1767 /* Check if slot started executing. */ 1768 sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); 1769 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) 1770 >> AHCI_P_CMD_CCS_SHIFT; 1771 if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot || 1772 ch->fbs_enabled || ch->wrongccs) 1773 slot->state = AHCI_SLOT_EXECUTING; 1774 else if ((ch->rslots & (1 << ccs)) == 0) { 1775 ch->wrongccs = 1; 1776 slot->state = AHCI_SLOT_EXECUTING; 1777 } 1778 1779 callout_reset_sbt(&slot->timeout, 1780 SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, 1781 (timeout_t*)ahci_timeout, slot, 0); 1782 return; 1783 } 1784 1785 device_printf(dev, "Timeout on slot %d port %d\n", 1786 slot->slot, slot->ccb->ccb_h.target_id & 0x0f); 1787 device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x " 1788 "serr %08x cmd %08x\n", 1789 ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), 1790 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, 1791 ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), 1792 ATA_INL(ch->r_mem, AHCI_P_CMD)); 1793 1794 /* Handle frozen command. */ 1795 if (ch->frozen) { 1796 union ccb *fccb = ch->frozen; 1797 ch->frozen = NULL; 1798 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 1799 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 1800 xpt_freeze_devq(fccb->ccb_h.path, 1); 1801 fccb->ccb_h.status |= CAM_DEV_QFRZN; 1802 } 1803 ahci_done(ch, fccb); 1804 } 1805 if (!ch->fbs_enabled && !ch->wrongccs) { 1806 /* Without FBS we know real timeout source. */ 1807 ch->fatalerr = 1; 1808 /* Handle command with timeout. */ 1809 ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT); 1810 /* Handle the rest of commands. */ 1811 for (i = 0; i < ch->numslots; i++) { 1812 /* Do we have a running request on slot? */ 1813 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 1814 continue; 1815 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); 1816 } 1817 } else { 1818 /* With FBS we wait for other commands timeout and pray. */ 1819 if (ch->toslots == 0) 1820 xpt_freeze_simq(ch->sim, 1); 1821 ch->toslots |= (1 << slot->slot); 1822 if ((ch->rslots & ~ch->toslots) == 0) 1823 ahci_process_timeout(ch); 1824 else 1825 device_printf(dev, " ... waiting for slots %08x\n", 1826 ch->rslots & ~ch->toslots); 1827 } 1828 } 1829 1830 /* Must be called with channel locked. */ 1831 static void 1832 ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et) 1833 { 1834 struct ahci_channel *ch = slot->ch; 1835 union ccb *ccb = slot->ccb; 1836 struct ahci_cmd_list *clp; 1837 int lastto; 1838 uint32_t sig; 1839 1840 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 1841 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1842 clp = (struct ahci_cmd_list *) 1843 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); 1844 /* Read result registers to the result struct 1845 * May be incorrect if several commands finished same time, 1846 * so read only when sure or have to. 1847 */ 1848 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1849 struct ata_res *res = &ccb->ataio.res; 1850 1851 if ((et == AHCI_ERR_TFE) || 1852 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { 1853 u_int8_t *fis = ch->dma.rfis + 0x40; 1854 1855 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, 1856 BUS_DMASYNC_POSTREAD); 1857 if (ch->fbs_enabled) { 1858 fis += ccb->ccb_h.target_id * 256; 1859 res->status = fis[2]; 1860 res->error = fis[3]; 1861 } else { 1862 uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD); 1863 1864 res->status = tfd; 1865 res->error = tfd >> 8; 1866 } 1867 res->lba_low = fis[4]; 1868 res->lba_mid = fis[5]; 1869 res->lba_high = fis[6]; 1870 res->device = fis[7]; 1871 res->lba_low_exp = fis[8]; 1872 res->lba_mid_exp = fis[9]; 1873 res->lba_high_exp = fis[10]; 1874 res->sector_count = fis[12]; 1875 res->sector_count_exp = fis[13]; 1876 1877 /* 1878 * Some weird controllers do not return signature in 1879 * FIS receive area. Read it from PxSIG register. 1880 */ 1881 if ((ch->quirks & AHCI_Q_ALTSIG) && 1882 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1883 (ccb->ataio.cmd.control & ATA_A_RESET) == 0) { 1884 sig = ATA_INL(ch->r_mem, AHCI_P_SIG); 1885 res->lba_high = sig >> 24; 1886 res->lba_mid = sig >> 16; 1887 res->lba_low = sig >> 8; 1888 res->sector_count = sig; 1889 } 1890 } else 1891 bzero(res, sizeof(*res)); 1892 if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 && 1893 (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1894 (ch->quirks & AHCI_Q_NOCOUNT) == 0) { 1895 ccb->ataio.resid = 1896 ccb->ataio.dxfer_len - le32toh(clp->bytecount); 1897 } 1898 } else { 1899 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1900 (ch->quirks & AHCI_Q_NOCOUNT) == 0) { 1901 ccb->csio.resid = 1902 ccb->csio.dxfer_len - le32toh(clp->bytecount); 1903 } 1904 } 1905 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1906 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, 1907 (ccb->ccb_h.flags & CAM_DIR_IN) ? 1908 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1909 bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); 1910 } 1911 if (et != AHCI_ERR_NONE) 1912 ch->eslots |= (1 << slot->slot); 1913 /* In case of error, freeze device for proper recovery. */ 1914 if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) && 1915 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 1916 xpt_freeze_devq(ccb->ccb_h.path, 1); 1917 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1918 } 1919 /* Set proper result status. */ 1920 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1921 switch (et) { 1922 case AHCI_ERR_NONE: 1923 ccb->ccb_h.status |= CAM_REQ_CMP; 1924 if (ccb->ccb_h.func_code == XPT_SCSI_IO) 1925 ccb->csio.scsi_status = SCSI_STATUS_OK; 1926 break; 1927 case AHCI_ERR_INVALID: 1928 ch->fatalerr = 1; 1929 ccb->ccb_h.status |= CAM_REQ_INVALID; 1930 break; 1931 case AHCI_ERR_INNOCENT: 1932 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1933 break; 1934 case AHCI_ERR_TFE: 1935 case AHCI_ERR_NCQ: 1936 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 1937 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1938 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1939 } else { 1940 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; 1941 } 1942 break; 1943 case AHCI_ERR_SATA: 1944 ch->fatalerr = 1; 1945 if (!ch->recoverycmd) { 1946 xpt_freeze_simq(ch->sim, 1); 1947 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1948 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1949 } 1950 ccb->ccb_h.status |= CAM_UNCOR_PARITY; 1951 break; 1952 case AHCI_ERR_TIMEOUT: 1953 if (!ch->recoverycmd) { 1954 xpt_freeze_simq(ch->sim, 1); 1955 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1956 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1957 } 1958 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 1959 break; 1960 default: 1961 ch->fatalerr = 1; 1962 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 1963 } 1964 /* Free slot. */ 1965 ch->oslots &= ~(1 << slot->slot); 1966 ch->rslots &= ~(1 << slot->slot); 1967 ch->aslots &= ~(1 << slot->slot); 1968 slot->state = AHCI_SLOT_EMPTY; 1969 slot->ccb = NULL; 1970 /* Update channel stats. */ 1971 ch->numrslots--; 1972 ch->numrslotspd[ccb->ccb_h.target_id]--; 1973 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1974 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1975 ch->numtslots--; 1976 ch->numtslotspd[ccb->ccb_h.target_id]--; 1977 } 1978 /* Cancel timeout state if request completed normally. */ 1979 if (et != AHCI_ERR_TIMEOUT) { 1980 lastto = (ch->toslots == (1 << slot->slot)); 1981 ch->toslots &= ~(1 << slot->slot); 1982 if (lastto) 1983 xpt_release_simq(ch->sim, TRUE); 1984 } 1985 /* If it was first request of reset sequence and there is no error, 1986 * proceed to second request. */ 1987 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1988 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1989 (ccb->ataio.cmd.control & ATA_A_RESET) && 1990 et == AHCI_ERR_NONE) { 1991 ccb->ataio.cmd.control &= ~ATA_A_RESET; 1992 ahci_begin_transaction(ch, ccb); 1993 return; 1994 } 1995 /* If it was our READ LOG command - process it. */ 1996 if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { 1997 ahci_process_read_log(ch, ccb); 1998 /* If it was our REQUEST SENSE command - process it. */ 1999 } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { 2000 ahci_process_request_sense(ch, ccb); 2001 /* If it was NCQ or ATAPI command error, put result on hold. */ 2002 } else if (et == AHCI_ERR_NCQ || 2003 ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && 2004 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { 2005 ch->hold[slot->slot] = ccb; 2006 ch->numhslots++; 2007 } else 2008 ahci_done(ch, ccb); 2009 /* If we have no other active commands, ... */ 2010 if (ch->rslots == 0) { 2011 /* if there was fatal error - reset port. */ 2012 if (ch->toslots != 0 || ch->fatalerr) { 2013 ahci_reset(ch); 2014 } else { 2015 /* if we have slots in error, we can reinit port. */ 2016 if (ch->eslots != 0) { 2017 ahci_stop(ch); 2018 ahci_clo(ch); 2019 ahci_start(ch, 1); 2020 } 2021 /* if there commands on hold, we can do READ LOG. */ 2022 if (!ch->recoverycmd && ch->numhslots) 2023 ahci_issue_recovery(ch); 2024 } 2025 /* If all the rest of commands are in timeout - give them chance. */ 2026 } else if ((ch->rslots & ~ch->toslots) == 0 && 2027 et != AHCI_ERR_TIMEOUT) 2028 ahci_rearm_timeout(ch); 2029 /* Unfreeze frozen command. */ 2030 if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) { 2031 union ccb *fccb = ch->frozen; 2032 ch->frozen = NULL; 2033 ahci_begin_transaction(ch, fccb); 2034 xpt_release_simq(ch->sim, TRUE); 2035 } 2036 /* Start PM timer. */ 2037 if (ch->numrslots == 0 && ch->pm_level > 3 && 2038 (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) { 2039 callout_schedule(&ch->pm_timer, 2040 (ch->pm_level == 4) ? hz / 1000 : hz / 8); 2041 } 2042 } 2043 2044 static void 2045 ahci_issue_recovery(struct ahci_channel *ch) 2046 { 2047 union ccb *ccb; 2048 struct ccb_ataio *ataio; 2049 struct ccb_scsiio *csio; 2050 int i; 2051 2052 /* Find some held command. */ 2053 for (i = 0; i < ch->numslots; i++) { 2054 if (ch->hold[i]) 2055 break; 2056 } 2057 ccb = xpt_alloc_ccb_nowait(); 2058 if (ccb == NULL) { 2059 device_printf(ch->dev, "Unable to allocate recovery command\n"); 2060 completeall: 2061 /* We can't do anything -- complete held commands. */ 2062 for (i = 0; i < ch->numslots; i++) { 2063 if (ch->hold[i] == NULL) 2064 continue; 2065 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2066 ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; 2067 ahci_done(ch, ch->hold[i]); 2068 ch->hold[i] = NULL; 2069 ch->numhslots--; 2070 } 2071 ahci_reset(ch); 2072 return; 2073 } 2074 ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */ 2075 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 2076 /* READ LOG */ 2077 ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; 2078 ccb->ccb_h.func_code = XPT_ATA_IO; 2079 ccb->ccb_h.flags = CAM_DIR_IN; 2080 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ 2081 ataio = &ccb->ataio; 2082 ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT); 2083 if (ataio->data_ptr == NULL) { 2084 xpt_free_ccb(ccb); 2085 device_printf(ch->dev, 2086 "Unable to allocate memory for READ LOG command\n"); 2087 goto completeall; 2088 } 2089 ataio->dxfer_len = 512; 2090 bzero(&ataio->cmd, sizeof(ataio->cmd)); 2091 ataio->cmd.flags = CAM_ATAIO_48BIT; 2092 ataio->cmd.command = 0x2F; /* READ LOG EXT */ 2093 ataio->cmd.sector_count = 1; 2094 ataio->cmd.sector_count_exp = 0; 2095 ataio->cmd.lba_low = 0x10; 2096 ataio->cmd.lba_mid = 0; 2097 ataio->cmd.lba_mid_exp = 0; 2098 } else { 2099 /* REQUEST SENSE */ 2100 ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; 2101 ccb->ccb_h.recovery_slot = i; 2102 ccb->ccb_h.func_code = XPT_SCSI_IO; 2103 ccb->ccb_h.flags = CAM_DIR_IN; 2104 ccb->ccb_h.status = 0; 2105 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ 2106 csio = &ccb->csio; 2107 csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; 2108 csio->dxfer_len = ch->hold[i]->csio.sense_len; 2109 csio->cdb_len = 6; 2110 bzero(&csio->cdb_io, sizeof(csio->cdb_io)); 2111 csio->cdb_io.cdb_bytes[0] = 0x03; 2112 csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; 2113 } 2114 /* Freeze SIM while doing recovery. */ 2115 ch->recoverycmd = 1; 2116 xpt_freeze_simq(ch->sim, 1); 2117 ahci_begin_transaction(ch, ccb); 2118 } 2119 2120 static void 2121 ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb) 2122 { 2123 uint8_t *data; 2124 struct ata_res *res; 2125 int i; 2126 2127 ch->recoverycmd = 0; 2128 2129 data = ccb->ataio.data_ptr; 2130 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2131 (data[0] & 0x80) == 0) { 2132 for (i = 0; i < ch->numslots; i++) { 2133 if (!ch->hold[i]) 2134 continue; 2135 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) 2136 continue; 2137 if ((data[0] & 0x1F) == i) { 2138 res = &ch->hold[i]->ataio.res; 2139 res->status = data[2]; 2140 res->error = data[3]; 2141 res->lba_low = data[4]; 2142 res->lba_mid = data[5]; 2143 res->lba_high = data[6]; 2144 res->device = data[7]; 2145 res->lba_low_exp = data[8]; 2146 res->lba_mid_exp = data[9]; 2147 res->lba_high_exp = data[10]; 2148 res->sector_count = data[12]; 2149 res->sector_count_exp = data[13]; 2150 } else { 2151 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2152 ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; 2153 } 2154 ahci_done(ch, ch->hold[i]); 2155 ch->hold[i] = NULL; 2156 ch->numhslots--; 2157 } 2158 } else { 2159 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 2160 device_printf(ch->dev, "Error while READ LOG EXT\n"); 2161 else if ((data[0] & 0x80) == 0) { 2162 device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n"); 2163 } 2164 for (i = 0; i < ch->numslots; i++) { 2165 if (!ch->hold[i]) 2166 continue; 2167 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) 2168 continue; 2169 ahci_done(ch, ch->hold[i]); 2170 ch->hold[i] = NULL; 2171 ch->numhslots--; 2172 } 2173 } 2174 free(ccb->ataio.data_ptr, M_AHCI); 2175 xpt_free_ccb(ccb); 2176 xpt_release_simq(ch->sim, TRUE); 2177 } 2178 2179 static void 2180 ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb) 2181 { 2182 int i; 2183 2184 ch->recoverycmd = 0; 2185 2186 i = ccb->ccb_h.recovery_slot; 2187 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 2188 ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; 2189 } else { 2190 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2191 ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2192 } 2193 ahci_done(ch, ch->hold[i]); 2194 ch->hold[i] = NULL; 2195 ch->numhslots--; 2196 xpt_free_ccb(ccb); 2197 xpt_release_simq(ch->sim, TRUE); 2198 } 2199 2200 static void 2201 ahci_start(struct ahci_channel *ch, int fbs) 2202 { 2203 u_int32_t cmd; 2204 2205 /* Run the channel start callback, if any. */ 2206 if (ch->start) 2207 ch->start(ch); 2208 2209 /* Clear SATA error register */ 2210 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF); 2211 /* Clear any interrupts pending on this channel */ 2212 ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF); 2213 /* Configure FIS-based switching if supported. */ 2214 if (ch->chcaps & AHCI_P_CMD_FBSCP) { 2215 ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0; 2216 ATA_OUTL(ch->r_mem, AHCI_P_FBS, 2217 ch->fbs_enabled ? AHCI_P_FBS_EN : 0); 2218 } 2219 /* Start operations on this channel */ 2220 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2221 cmd &= ~AHCI_P_CMD_PMA; 2222 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST | 2223 (ch->pm_present ? AHCI_P_CMD_PMA : 0)); 2224 } 2225 2226 static void 2227 ahci_stop(struct ahci_channel *ch) 2228 { 2229 u_int32_t cmd; 2230 int timeout; 2231 2232 /* Kill all activity on this channel */ 2233 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2234 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST); 2235 /* Wait for activity stop. */ 2236 timeout = 0; 2237 do { 2238 DELAY(10); 2239 if (timeout++ > 50000) { 2240 device_printf(ch->dev, "stopping AHCI engine failed\n"); 2241 break; 2242 } 2243 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR); 2244 ch->eslots = 0; 2245 } 2246 2247 static void 2248 ahci_clo(struct ahci_channel *ch) 2249 { 2250 u_int32_t cmd; 2251 int timeout; 2252 2253 /* Issue Command List Override if supported */ 2254 if (ch->caps & AHCI_CAP_SCLO) { 2255 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2256 cmd |= AHCI_P_CMD_CLO; 2257 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd); 2258 timeout = 0; 2259 do { 2260 DELAY(10); 2261 if (timeout++ > 50000) { 2262 device_printf(ch->dev, "executing CLO failed\n"); 2263 break; 2264 } 2265 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO); 2266 } 2267 } 2268 2269 static void 2270 ahci_stop_fr(struct ahci_channel *ch) 2271 { 2272 u_int32_t cmd; 2273 int timeout; 2274 2275 /* Kill all FIS reception on this channel */ 2276 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2277 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE); 2278 /* Wait for FIS reception stop. */ 2279 timeout = 0; 2280 do { 2281 DELAY(10); 2282 if (timeout++ > 50000) { 2283 device_printf(ch->dev, "stopping AHCI FR engine failed\n"); 2284 break; 2285 } 2286 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR); 2287 } 2288 2289 static void 2290 ahci_start_fr(struct ahci_channel *ch) 2291 { 2292 u_int32_t cmd; 2293 2294 /* Start FIS reception on this channel */ 2295 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2296 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE); 2297 } 2298 2299 static int 2300 ahci_wait_ready(struct ahci_channel *ch, int t, int t0) 2301 { 2302 int timeout = 0; 2303 uint32_t val; 2304 2305 while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) & 2306 (ATA_S_BUSY | ATA_S_DRQ)) { 2307 if (timeout > t) { 2308 if (t != 0) { 2309 device_printf(ch->dev, 2310 "AHCI reset: device not ready after %dms " 2311 "(tfd = %08x)\n", 2312 MAX(t, 0) + t0, val); 2313 } 2314 return (EBUSY); 2315 } 2316 DELAY(1000); 2317 timeout++; 2318 } 2319 if (bootverbose) 2320 device_printf(ch->dev, "AHCI reset: device ready after %dms\n", 2321 timeout + t0); 2322 return (0); 2323 } 2324 2325 static void 2326 ahci_reset_to(void *arg) 2327 { 2328 struct ahci_channel *ch = arg; 2329 2330 if (ch->resetting == 0) 2331 return; 2332 ch->resetting--; 2333 if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0, 2334 (310 - ch->resetting) * 100) == 0) { 2335 ch->resetting = 0; 2336 ahci_start(ch, 1); 2337 xpt_release_simq(ch->sim, TRUE); 2338 return; 2339 } 2340 if (ch->resetting == 0) { 2341 ahci_clo(ch); 2342 ahci_start(ch, 1); 2343 xpt_release_simq(ch->sim, TRUE); 2344 return; 2345 } 2346 callout_schedule(&ch->reset_timer, hz / 10); 2347 } 2348 2349 static void 2350 ahci_reset(struct ahci_channel *ch) 2351 { 2352 struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); 2353 int i; 2354 2355 xpt_freeze_simq(ch->sim, 1); 2356 if (bootverbose) 2357 device_printf(ch->dev, "AHCI reset...\n"); 2358 /* Forget about previous reset. */ 2359 if (ch->resetting) { 2360 ch->resetting = 0; 2361 callout_stop(&ch->reset_timer); 2362 xpt_release_simq(ch->sim, TRUE); 2363 } 2364 /* Requeue freezed command. */ 2365 if (ch->frozen) { 2366 union ccb *fccb = ch->frozen; 2367 ch->frozen = NULL; 2368 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 2369 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 2370 xpt_freeze_devq(fccb->ccb_h.path, 1); 2371 fccb->ccb_h.status |= CAM_DEV_QFRZN; 2372 } 2373 ahci_done(ch, fccb); 2374 } 2375 /* Kill the engine and requeue all running commands. */ 2376 ahci_stop(ch); 2377 for (i = 0; i < ch->numslots; i++) { 2378 /* Do we have a running request on slot? */ 2379 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 2380 continue; 2381 /* XXX; Commands in loading state. */ 2382 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); 2383 } 2384 for (i = 0; i < ch->numslots; i++) { 2385 if (!ch->hold[i]) 2386 continue; 2387 ahci_done(ch, ch->hold[i]); 2388 ch->hold[i] = NULL; 2389 ch->numhslots--; 2390 } 2391 if (ch->toslots != 0) 2392 xpt_release_simq(ch->sim, TRUE); 2393 ch->eslots = 0; 2394 ch->toslots = 0; 2395 ch->wrongccs = 0; 2396 ch->fatalerr = 0; 2397 /* Tell the XPT about the event */ 2398 xpt_async(AC_BUS_RESET, ch->path, NULL); 2399 /* Disable port interrupts */ 2400 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 2401 /* Reset and reconnect PHY, */ 2402 if (!ahci_sata_phy_reset(ch)) { 2403 if (bootverbose) 2404 device_printf(ch->dev, 2405 "AHCI reset: device not found\n"); 2406 ch->devices = 0; 2407 /* Enable wanted port interrupts */ 2408 ATA_OUTL(ch->r_mem, AHCI_P_IE, 2409 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | 2410 AHCI_P_IX_PRC | AHCI_P_IX_PC)); 2411 xpt_release_simq(ch->sim, TRUE); 2412 return; 2413 } 2414 if (bootverbose) 2415 device_printf(ch->dev, "AHCI reset: device found\n"); 2416 /* Wait for clearing busy status. */ 2417 if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) { 2418 if (dumping) 2419 ahci_clo(ch); 2420 else 2421 ch->resetting = 310; 2422 } 2423 ch->devices = 1; 2424 /* Enable wanted port interrupts */ 2425 ATA_OUTL(ch->r_mem, AHCI_P_IE, 2426 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | 2427 AHCI_P_IX_TFE | AHCI_P_IX_HBF | 2428 AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF | 2429 ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC | 2430 AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) | 2431 AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR))); 2432 if (ch->resetting) 2433 callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch); 2434 else { 2435 ahci_start(ch, 1); 2436 xpt_release_simq(ch->sim, TRUE); 2437 } 2438 } 2439 2440 static int 2441 ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag) 2442 { 2443 u_int8_t *fis = &ctp->cfis[0]; 2444 2445 bzero(fis, 20); 2446 fis[0] = 0x27; /* host to device */ 2447 fis[1] = (ccb->ccb_h.target_id & 0x0f); 2448 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 2449 fis[1] |= 0x80; 2450 fis[2] = ATA_PACKET_CMD; 2451 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 2452 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 2453 fis[3] = ATA_F_DMA; 2454 else { 2455 fis[5] = ccb->csio.dxfer_len; 2456 fis[6] = ccb->csio.dxfer_len >> 8; 2457 } 2458 fis[7] = ATA_D_LBA; 2459 fis[15] = ATA_A_4BIT; 2460 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 2461 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, 2462 ctp->acmd, ccb->csio.cdb_len); 2463 bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len); 2464 } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { 2465 fis[1] |= 0x80; 2466 fis[2] = ccb->ataio.cmd.command; 2467 fis[3] = ccb->ataio.cmd.features; 2468 fis[4] = ccb->ataio.cmd.lba_low; 2469 fis[5] = ccb->ataio.cmd.lba_mid; 2470 fis[6] = ccb->ataio.cmd.lba_high; 2471 fis[7] = ccb->ataio.cmd.device; 2472 fis[8] = ccb->ataio.cmd.lba_low_exp; 2473 fis[9] = ccb->ataio.cmd.lba_mid_exp; 2474 fis[10] = ccb->ataio.cmd.lba_high_exp; 2475 fis[11] = ccb->ataio.cmd.features_exp; 2476 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { 2477 fis[12] = tag << 3; 2478 } else { 2479 fis[12] = ccb->ataio.cmd.sector_count; 2480 } 2481 fis[13] = ccb->ataio.cmd.sector_count_exp; 2482 fis[15] = ATA_A_4BIT; 2483 } else { 2484 fis[15] = ccb->ataio.cmd.control; 2485 } 2486 if (ccb->ataio.ata_flags & ATA_FLAG_AUX) { 2487 fis[16] = ccb->ataio.aux & 0xff; 2488 fis[17] = (ccb->ataio.aux >> 8) & 0xff; 2489 fis[18] = (ccb->ataio.aux >> 16) & 0xff; 2490 fis[19] = (ccb->ataio.aux >> 24) & 0xff; 2491 } 2492 return (20); 2493 } 2494 2495 static int 2496 ahci_sata_connect(struct ahci_channel *ch) 2497 { 2498 u_int32_t status; 2499 int timeout, found = 0; 2500 2501 /* Wait up to 100ms for "connect well" */ 2502 for (timeout = 0; timeout < 1000 ; timeout++) { 2503 status = ATA_INL(ch->r_mem, AHCI_P_SSTS); 2504 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) 2505 found = 1; 2506 if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && 2507 ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && 2508 ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) 2509 break; 2510 if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { 2511 if (bootverbose) { 2512 device_printf(ch->dev, "SATA offline status=%08x\n", 2513 status); 2514 } 2515 return (0); 2516 } 2517 if (found == 0 && timeout >= 100) 2518 break; 2519 DELAY(100); 2520 } 2521 if (timeout >= 1000 || !found) { 2522 if (bootverbose) { 2523 device_printf(ch->dev, 2524 "SATA connect timeout time=%dus status=%08x\n", 2525 timeout * 100, status); 2526 } 2527 return (0); 2528 } 2529 if (bootverbose) { 2530 device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", 2531 timeout * 100, status); 2532 } 2533 /* Clear SATA error register */ 2534 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff); 2535 return (1); 2536 } 2537 2538 static int 2539 ahci_sata_phy_reset(struct ahci_channel *ch) 2540 { 2541 int sata_rev; 2542 uint32_t val, detval; 2543 2544 if (ch->listening) { 2545 val = ATA_INL(ch->r_mem, AHCI_P_CMD); 2546 val |= AHCI_P_CMD_SUD; 2547 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); 2548 ch->listening = 0; 2549 } 2550 sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; 2551 if (sata_rev == 1) 2552 val = ATA_SC_SPD_SPEED_GEN1; 2553 else if (sata_rev == 2) 2554 val = ATA_SC_SPD_SPEED_GEN2; 2555 else if (sata_rev == 3) 2556 val = ATA_SC_SPD_SPEED_GEN3; 2557 else 2558 val = 0; 2559 detval = ahci_ch_detval(ch, ATA_SC_DET_RESET); 2560 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 2561 detval | val | 2562 ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER); 2563 DELAY(1000); 2564 detval = ahci_ch_detval(ch, ATA_SC_DET_IDLE); 2565 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 2566 detval | val | ((ch->pm_level > 0) ? 0 : 2567 (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); 2568 if (!ahci_sata_connect(ch)) { 2569 if (ch->caps & AHCI_CAP_SSS) { 2570 val = ATA_INL(ch->r_mem, AHCI_P_CMD); 2571 val &= ~AHCI_P_CMD_SUD; 2572 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); 2573 ch->listening = 1; 2574 } else if (ch->pm_level > 0) 2575 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); 2576 return (0); 2577 } 2578 return (1); 2579 } 2580 2581 static int 2582 ahci_check_ids(struct ahci_channel *ch, union ccb *ccb) 2583 { 2584 2585 if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) { 2586 ccb->ccb_h.status = CAM_TID_INVALID; 2587 ahci_done(ch, ccb); 2588 return (-1); 2589 } 2590 if (ccb->ccb_h.target_lun != 0) { 2591 ccb->ccb_h.status = CAM_LUN_INVALID; 2592 ahci_done(ch, ccb); 2593 return (-1); 2594 } 2595 return (0); 2596 } 2597 2598 static void 2599 ahciaction(struct cam_sim *sim, union ccb *ccb) 2600 { 2601 struct ahci_channel *ch; 2602 2603 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n", 2604 ccb->ccb_h.func_code)); 2605 2606 ch = (struct ahci_channel *)cam_sim_softc(sim); 2607 switch (ccb->ccb_h.func_code) { 2608 /* Common cases first */ 2609 case XPT_ATA_IO: /* Execute the requested I/O operation */ 2610 case XPT_SCSI_IO: 2611 if (ahci_check_ids(ch, ccb)) 2612 return; 2613 if (ch->devices == 0 || 2614 (ch->pm_present == 0 && 2615 ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { 2616 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 2617 break; 2618 } 2619 ccb->ccb_h.recovery_type = RECOVERY_NONE; 2620 /* Check for command collision. */ 2621 if (ahci_check_collision(ch, ccb)) { 2622 /* Freeze command. */ 2623 ch->frozen = ccb; 2624 /* We have only one frozen slot, so freeze simq also. */ 2625 xpt_freeze_simq(ch->sim, 1); 2626 return; 2627 } 2628 ahci_begin_transaction(ch, ccb); 2629 return; 2630 case XPT_ABORT: /* Abort the specified CCB */ 2631 /* XXX Implement */ 2632 ccb->ccb_h.status = CAM_REQ_INVALID; 2633 break; 2634 case XPT_SET_TRAN_SETTINGS: 2635 { 2636 struct ccb_trans_settings *cts = &ccb->cts; 2637 struct ahci_device *d; 2638 2639 if (ahci_check_ids(ch, ccb)) 2640 return; 2641 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 2642 d = &ch->curr[ccb->ccb_h.target_id]; 2643 else 2644 d = &ch->user[ccb->ccb_h.target_id]; 2645 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) 2646 d->revision = cts->xport_specific.sata.revision; 2647 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) 2648 d->mode = cts->xport_specific.sata.mode; 2649 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) 2650 d->bytecount = min(8192, cts->xport_specific.sata.bytecount); 2651 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) 2652 d->tags = min(ch->numslots, cts->xport_specific.sata.tags); 2653 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) 2654 ch->pm_present = cts->xport_specific.sata.pm_present; 2655 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) 2656 d->atapi = cts->xport_specific.sata.atapi; 2657 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) 2658 d->caps = cts->xport_specific.sata.caps; 2659 ccb->ccb_h.status = CAM_REQ_CMP; 2660 break; 2661 } 2662 case XPT_GET_TRAN_SETTINGS: 2663 /* Get default/user set transfer settings for the target */ 2664 { 2665 struct ccb_trans_settings *cts = &ccb->cts; 2666 struct ahci_device *d; 2667 uint32_t status; 2668 2669 if (ahci_check_ids(ch, ccb)) 2670 return; 2671 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 2672 d = &ch->curr[ccb->ccb_h.target_id]; 2673 else 2674 d = &ch->user[ccb->ccb_h.target_id]; 2675 cts->protocol = PROTO_UNSPECIFIED; 2676 cts->protocol_version = PROTO_VERSION_UNSPECIFIED; 2677 cts->transport = XPORT_SATA; 2678 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 2679 cts->proto_specific.valid = 0; 2680 cts->xport_specific.sata.valid = 0; 2681 if (cts->type == CTS_TYPE_CURRENT_SETTINGS && 2682 (ccb->ccb_h.target_id == 15 || 2683 (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { 2684 status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK; 2685 if (status & 0x0f0) { 2686 cts->xport_specific.sata.revision = 2687 (status & 0x0f0) >> 4; 2688 cts->xport_specific.sata.valid |= 2689 CTS_SATA_VALID_REVISION; 2690 } 2691 cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; 2692 if (ch->pm_level) { 2693 if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC)) 2694 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; 2695 if (ch->caps2 & AHCI_CAP2_APST) 2696 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST; 2697 } 2698 if ((ch->caps & AHCI_CAP_SNCQ) && 2699 (ch->quirks & AHCI_Q_NOAA) == 0) 2700 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA; 2701 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; 2702 cts->xport_specific.sata.caps &= 2703 ch->user[ccb->ccb_h.target_id].caps; 2704 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 2705 } else { 2706 cts->xport_specific.sata.revision = d->revision; 2707 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; 2708 cts->xport_specific.sata.caps = d->caps; 2709 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 2710 } 2711 cts->xport_specific.sata.mode = d->mode; 2712 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; 2713 cts->xport_specific.sata.bytecount = d->bytecount; 2714 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; 2715 cts->xport_specific.sata.pm_present = ch->pm_present; 2716 cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; 2717 cts->xport_specific.sata.tags = d->tags; 2718 cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; 2719 cts->xport_specific.sata.atapi = d->atapi; 2720 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; 2721 ccb->ccb_h.status = CAM_REQ_CMP; 2722 break; 2723 } 2724 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 2725 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 2726 ahci_reset(ch); 2727 ccb->ccb_h.status = CAM_REQ_CMP; 2728 break; 2729 case XPT_TERM_IO: /* Terminate the I/O process */ 2730 /* XXX Implement */ 2731 ccb->ccb_h.status = CAM_REQ_INVALID; 2732 break; 2733 case XPT_PATH_INQ: /* Path routing inquiry */ 2734 { 2735 struct ccb_pathinq *cpi = &ccb->cpi; 2736 2737 cpi->version_num = 1; /* XXX??? */ 2738 cpi->hba_inquiry = PI_SDTR_ABLE; 2739 if (ch->caps & AHCI_CAP_SNCQ) 2740 cpi->hba_inquiry |= PI_TAG_ABLE; 2741 if (ch->caps & AHCI_CAP_SPM) 2742 cpi->hba_inquiry |= PI_SATAPM; 2743 cpi->target_sprt = 0; 2744 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; 2745 if ((ch->quirks & AHCI_Q_NOAUX) == 0) 2746 cpi->hba_misc |= PIM_ATA_EXT; 2747 cpi->hba_eng_cnt = 0; 2748 if (ch->caps & AHCI_CAP_SPM) 2749 cpi->max_target = 15; 2750 else 2751 cpi->max_target = 0; 2752 cpi->max_lun = 0; 2753 cpi->initiator_id = 0; 2754 cpi->bus_id = cam_sim_bus(sim); 2755 cpi->base_transfer_speed = 150000; 2756 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2757 strlcpy(cpi->hba_vid, "AHCI", HBA_IDLEN); 2758 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2759 cpi->unit_number = cam_sim_unit(sim); 2760 cpi->transport = XPORT_SATA; 2761 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 2762 cpi->protocol = PROTO_ATA; 2763 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 2764 cpi->maxio = MAXPHYS; 2765 /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */ 2766 if (ch->quirks & AHCI_Q_MAXIO_64K) 2767 cpi->maxio = min(cpi->maxio, 128 * 512); 2768 cpi->hba_vendor = ch->vendorid; 2769 cpi->hba_device = ch->deviceid; 2770 cpi->hba_subvendor = ch->subvendorid; 2771 cpi->hba_subdevice = ch->subdeviceid; 2772 cpi->ccb_h.status = CAM_REQ_CMP; 2773 break; 2774 } 2775 default: 2776 ccb->ccb_h.status = CAM_REQ_INVALID; 2777 break; 2778 } 2779 ahci_done(ch, ccb); 2780 } 2781 2782 static void 2783 ahcipoll(struct cam_sim *sim) 2784 { 2785 struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim); 2786 uint32_t istatus; 2787 2788 /* Read interrupt statuses and process if any. */ 2789 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 2790 if (istatus != 0) 2791 ahci_ch_intr_main(ch, istatus); 2792 if (ch->resetting != 0 && 2793 (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) { 2794 ch->resetpolldiv = 1000; 2795 ahci_reset_to(ch); 2796 } 2797 } 2798 2799 devclass_t ahci_devclass; 2800 2801 MODULE_VERSION(ahci, 1); 2802 MODULE_DEPEND(ahci, cam, 1, 1, 1); 2803