1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2009-2012 Alexander Motin <mav@FreeBSD.org> 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer, 12 * without modification, immediately at the beginning of the file. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/module.h> 34 #include <sys/systm.h> 35 #include <sys/kernel.h> 36 #include <sys/bus.h> 37 #include <sys/conf.h> 38 #include <sys/endian.h> 39 #include <sys/malloc.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/sysctl.h> 43 #include <machine/stdarg.h> 44 #include <machine/resource.h> 45 #include <machine/bus.h> 46 #include <sys/rman.h> 47 #include "ahci.h" 48 49 #include <cam/cam.h> 50 #include <cam/cam_ccb.h> 51 #include <cam/cam_sim.h> 52 #include <cam/cam_xpt_sim.h> 53 #include <cam/cam_debug.h> 54 55 /* local prototypes */ 56 static void ahci_intr(void *data); 57 static void ahci_intr_one(void *data); 58 static void ahci_intr_one_edge(void *data); 59 static int ahci_ch_init(device_t dev); 60 static int ahci_ch_deinit(device_t dev); 61 static int ahci_ch_suspend(device_t dev); 62 static int ahci_ch_resume(device_t dev); 63 static void ahci_ch_pm(void *arg); 64 static void ahci_ch_intr(void *arg); 65 static void ahci_ch_intr_direct(void *arg); 66 static void ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus); 67 static void ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb); 68 static void ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error); 69 static void ahci_execute_transaction(struct ahci_slot *slot); 70 static void ahci_timeout(struct ahci_slot *slot); 71 static void ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et); 72 static int ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag); 73 static void ahci_dmainit(device_t dev); 74 static void ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); 75 static void ahci_dmafini(device_t dev); 76 static void ahci_slotsalloc(device_t dev); 77 static void ahci_slotsfree(device_t dev); 78 static void ahci_reset(struct ahci_channel *ch); 79 static void ahci_start(struct ahci_channel *ch, int fbs); 80 static void ahci_stop(struct ahci_channel *ch); 81 static void ahci_clo(struct ahci_channel *ch); 82 static void ahci_start_fr(struct ahci_channel *ch); 83 static void ahci_stop_fr(struct ahci_channel *ch); 84 static int ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr); 85 static uint32_t ahci_ch_detval(struct ahci_channel *ch, uint32_t val); 86 87 static int ahci_sata_connect(struct ahci_channel *ch); 88 static int ahci_sata_phy_reset(struct ahci_channel *ch); 89 static int ahci_wait_ready(struct ahci_channel *ch, int t, int t0); 90 91 static void ahci_issue_recovery(struct ahci_channel *ch); 92 static void ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb); 93 static void ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb); 94 95 static void ahciaction(struct cam_sim *sim, union ccb *ccb); 96 static void ahcipoll(struct cam_sim *sim); 97 98 static MALLOC_DEFINE(M_AHCI, "AHCI driver", "AHCI driver data buffers"); 99 100 #define recovery_type spriv_field0 101 #define RECOVERY_NONE 0 102 #define RECOVERY_READ_LOG 1 103 #define RECOVERY_REQUEST_SENSE 2 104 #define recovery_slot spriv_field1 105 106 static uint32_t 107 ahci_ch_detval(struct ahci_channel *ch, uint32_t val) 108 { 109 110 return ch->disablephy ? ATA_SC_DET_DISABLE : val; 111 } 112 113 int 114 ahci_ctlr_setup(device_t dev) 115 { 116 struct ahci_controller *ctlr = device_get_softc(dev); 117 /* Clear interrupts */ 118 ATA_OUTL(ctlr->r_mem, AHCI_IS, ATA_INL(ctlr->r_mem, AHCI_IS)); 119 /* Configure CCC */ 120 if (ctlr->ccc) { 121 ATA_OUTL(ctlr->r_mem, AHCI_CCCP, ATA_INL(ctlr->r_mem, AHCI_PI)); 122 ATA_OUTL(ctlr->r_mem, AHCI_CCCC, 123 (ctlr->ccc << AHCI_CCCC_TV_SHIFT) | 124 (4 << AHCI_CCCC_CC_SHIFT) | 125 AHCI_CCCC_EN); 126 ctlr->cccv = (ATA_INL(ctlr->r_mem, AHCI_CCCC) & 127 AHCI_CCCC_INT_MASK) >> AHCI_CCCC_INT_SHIFT; 128 if (bootverbose) { 129 device_printf(dev, 130 "CCC with %dms/4cmd enabled on vector %d\n", 131 ctlr->ccc, ctlr->cccv); 132 } 133 } 134 /* Enable AHCI interrupts */ 135 ATA_OUTL(ctlr->r_mem, AHCI_GHC, 136 ATA_INL(ctlr->r_mem, AHCI_GHC) | AHCI_GHC_IE); 137 return (0); 138 } 139 140 int 141 ahci_ctlr_reset(device_t dev) 142 { 143 struct ahci_controller *ctlr = device_get_softc(dev); 144 int timeout; 145 146 /* Enable AHCI mode */ 147 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); 148 /* Reset AHCI controller */ 149 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE|AHCI_GHC_HR); 150 for (timeout = 1000; timeout > 0; timeout--) { 151 DELAY(1000); 152 if ((ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_HR) == 0) 153 break; 154 } 155 if (timeout == 0) { 156 device_printf(dev, "AHCI controller reset failure\n"); 157 return (ENXIO); 158 } 159 /* Reenable AHCI mode */ 160 ATA_OUTL(ctlr->r_mem, AHCI_GHC, AHCI_GHC_AE); 161 162 if (ctlr->quirks & AHCI_Q_RESTORE_CAP) { 163 /* 164 * Restore capability field. 165 * This is write to a read-only register to restore its state. 166 * On fully standard-compliant hardware this is not needed and 167 * this operation shall not take place. See ahci_pci.c for 168 * platforms using this quirk. 169 */ 170 ATA_OUTL(ctlr->r_mem, AHCI_CAP, ctlr->caps); 171 } 172 173 return (0); 174 } 175 176 177 int 178 ahci_attach(device_t dev) 179 { 180 struct ahci_controller *ctlr = device_get_softc(dev); 181 int error, i, speed, unit; 182 uint32_t u, version; 183 device_t child; 184 185 ctlr->dev = dev; 186 ctlr->ccc = 0; 187 resource_int_value(device_get_name(dev), 188 device_get_unit(dev), "ccc", &ctlr->ccc); 189 190 /* Setup our own memory management for channels. */ 191 ctlr->sc_iomem.rm_start = rman_get_start(ctlr->r_mem); 192 ctlr->sc_iomem.rm_end = rman_get_end(ctlr->r_mem); 193 ctlr->sc_iomem.rm_type = RMAN_ARRAY; 194 ctlr->sc_iomem.rm_descr = "I/O memory addresses"; 195 if ((error = rman_init(&ctlr->sc_iomem)) != 0) { 196 ahci_free_mem(dev); 197 return (error); 198 } 199 if ((error = rman_manage_region(&ctlr->sc_iomem, 200 rman_get_start(ctlr->r_mem), rman_get_end(ctlr->r_mem))) != 0) { 201 ahci_free_mem(dev); 202 rman_fini(&ctlr->sc_iomem); 203 return (error); 204 } 205 /* Get the HW capabilities */ 206 version = ATA_INL(ctlr->r_mem, AHCI_VS); 207 ctlr->caps = ATA_INL(ctlr->r_mem, AHCI_CAP); 208 if (version >= 0x00010200) 209 ctlr->caps2 = ATA_INL(ctlr->r_mem, AHCI_CAP2); 210 if (ctlr->caps & AHCI_CAP_EMS) 211 ctlr->capsem = ATA_INL(ctlr->r_mem, AHCI_EM_CTL); 212 213 if (ctlr->quirks & AHCI_Q_FORCE_PI) { 214 /* 215 * Enable ports. 216 * The spec says that BIOS sets up bits corresponding to 217 * available ports. On platforms where this information 218 * is missing, the driver can define available ports on its own. 219 */ 220 int nports = (ctlr->caps & AHCI_CAP_NPMASK) + 1; 221 int nmask = (1 << nports) - 1; 222 223 ATA_OUTL(ctlr->r_mem, AHCI_PI, nmask); 224 device_printf(dev, "Forcing PI to %d ports (mask = %x)\n", 225 nports, nmask); 226 } 227 228 ctlr->ichannels = ATA_INL(ctlr->r_mem, AHCI_PI); 229 230 /* Identify and set separate quirks for HBA and RAID f/w Marvells. */ 231 if ((ctlr->quirks & AHCI_Q_ALTSIG) && 232 (ctlr->caps & AHCI_CAP_SPM) == 0) 233 ctlr->quirks |= AHCI_Q_NOBSYRES; 234 235 if (ctlr->quirks & AHCI_Q_1CH) { 236 ctlr->caps &= ~AHCI_CAP_NPMASK; 237 ctlr->ichannels &= 0x01; 238 } 239 if (ctlr->quirks & AHCI_Q_2CH) { 240 ctlr->caps &= ~AHCI_CAP_NPMASK; 241 ctlr->caps |= 1; 242 ctlr->ichannels &= 0x03; 243 } 244 if (ctlr->quirks & AHCI_Q_4CH) { 245 ctlr->caps &= ~AHCI_CAP_NPMASK; 246 ctlr->caps |= 3; 247 ctlr->ichannels &= 0x0f; 248 } 249 ctlr->channels = MAX(flsl(ctlr->ichannels), 250 (ctlr->caps & AHCI_CAP_NPMASK) + 1); 251 if (ctlr->quirks & AHCI_Q_NOPMP) 252 ctlr->caps &= ~AHCI_CAP_SPM; 253 if (ctlr->quirks & AHCI_Q_NONCQ) 254 ctlr->caps &= ~AHCI_CAP_SNCQ; 255 if ((ctlr->caps & AHCI_CAP_CCCS) == 0) 256 ctlr->ccc = 0; 257 ctlr->emloc = ATA_INL(ctlr->r_mem, AHCI_EM_LOC); 258 259 /* Create controller-wide DMA tag. */ 260 if (bus_dma_tag_create(bus_get_dma_tag(dev), 1, 0, 261 (ctlr->caps & AHCI_CAP_64BIT) ? BUS_SPACE_MAXADDR : 262 BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 263 BUS_SPACE_MAXSIZE, BUS_SPACE_UNRESTRICTED, BUS_SPACE_MAXSIZE, 264 ctlr->dma_coherent ? BUS_DMA_COHERENT : 0, NULL, NULL, 265 &ctlr->dma_tag)) { 266 ahci_free_mem(dev); 267 rman_fini(&ctlr->sc_iomem); 268 return (ENXIO); 269 } 270 271 ahci_ctlr_setup(dev); 272 273 /* Setup interrupts. */ 274 if ((error = ahci_setup_interrupt(dev)) != 0) { 275 bus_dma_tag_destroy(ctlr->dma_tag); 276 ahci_free_mem(dev); 277 rman_fini(&ctlr->sc_iomem); 278 return (error); 279 } 280 281 i = 0; 282 for (u = ctlr->ichannels; u != 0; u >>= 1) 283 i += (u & 1); 284 ctlr->direct = (ctlr->msi && (ctlr->numirqs > 1 || i <= 3)); 285 resource_int_value(device_get_name(dev), device_get_unit(dev), 286 "direct", &ctlr->direct); 287 /* Announce HW capabilities. */ 288 speed = (ctlr->caps & AHCI_CAP_ISS) >> AHCI_CAP_ISS_SHIFT; 289 device_printf(dev, 290 "AHCI v%x.%02x with %d %sGbps ports, Port Multiplier %s%s\n", 291 ((version >> 20) & 0xf0) + ((version >> 16) & 0x0f), 292 ((version >> 4) & 0xf0) + (version & 0x0f), 293 (ctlr->caps & AHCI_CAP_NPMASK) + 1, 294 ((speed == 1) ? "1.5":((speed == 2) ? "3": 295 ((speed == 3) ? "6":"?"))), 296 (ctlr->caps & AHCI_CAP_SPM) ? 297 "supported" : "not supported", 298 (ctlr->caps & AHCI_CAP_FBSS) ? 299 " with FBS" : ""); 300 if (ctlr->quirks != 0) { 301 device_printf(dev, "quirks=0x%b\n", ctlr->quirks, 302 AHCI_Q_BIT_STRING); 303 } 304 if (bootverbose) { 305 device_printf(dev, "Caps:%s%s%s%s%s%s%s%s %sGbps", 306 (ctlr->caps & AHCI_CAP_64BIT) ? " 64bit":"", 307 (ctlr->caps & AHCI_CAP_SNCQ) ? " NCQ":"", 308 (ctlr->caps & AHCI_CAP_SSNTF) ? " SNTF":"", 309 (ctlr->caps & AHCI_CAP_SMPS) ? " MPS":"", 310 (ctlr->caps & AHCI_CAP_SSS) ? " SS":"", 311 (ctlr->caps & AHCI_CAP_SALP) ? " ALP":"", 312 (ctlr->caps & AHCI_CAP_SAL) ? " AL":"", 313 (ctlr->caps & AHCI_CAP_SCLO) ? " CLO":"", 314 ((speed == 1) ? "1.5":((speed == 2) ? "3": 315 ((speed == 3) ? "6":"?")))); 316 printf("%s%s%s%s%s%s %dcmd%s%s%s %dports\n", 317 (ctlr->caps & AHCI_CAP_SAM) ? " AM":"", 318 (ctlr->caps & AHCI_CAP_SPM) ? " PM":"", 319 (ctlr->caps & AHCI_CAP_FBSS) ? " FBS":"", 320 (ctlr->caps & AHCI_CAP_PMD) ? " PMD":"", 321 (ctlr->caps & AHCI_CAP_SSC) ? " SSC":"", 322 (ctlr->caps & AHCI_CAP_PSC) ? " PSC":"", 323 ((ctlr->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1, 324 (ctlr->caps & AHCI_CAP_CCCS) ? " CCC":"", 325 (ctlr->caps & AHCI_CAP_EMS) ? " EM":"", 326 (ctlr->caps & AHCI_CAP_SXS) ? " eSATA":"", 327 (ctlr->caps & AHCI_CAP_NPMASK) + 1); 328 } 329 if (bootverbose && version >= 0x00010200) { 330 device_printf(dev, "Caps2:%s%s%s%s%s%s\n", 331 (ctlr->caps2 & AHCI_CAP2_DESO) ? " DESO":"", 332 (ctlr->caps2 & AHCI_CAP2_SADM) ? " SADM":"", 333 (ctlr->caps2 & AHCI_CAP2_SDS) ? " SDS":"", 334 (ctlr->caps2 & AHCI_CAP2_APST) ? " APST":"", 335 (ctlr->caps2 & AHCI_CAP2_NVMP) ? " NVMP":"", 336 (ctlr->caps2 & AHCI_CAP2_BOH) ? " BOH":""); 337 } 338 /* Attach all channels on this controller */ 339 for (unit = 0; unit < ctlr->channels; unit++) { 340 child = device_add_child(dev, "ahcich", -1); 341 if (child == NULL) { 342 device_printf(dev, "failed to add channel device\n"); 343 continue; 344 } 345 device_set_ivars(child, (void *)(intptr_t)unit); 346 if ((ctlr->ichannels & (1 << unit)) == 0) 347 device_disable(child); 348 } 349 if (ctlr->caps & AHCI_CAP_EMS) { 350 child = device_add_child(dev, "ahciem", -1); 351 if (child == NULL) 352 device_printf(dev, "failed to add enclosure device\n"); 353 else 354 device_set_ivars(child, (void *)(intptr_t)-1); 355 } 356 bus_generic_attach(dev); 357 return (0); 358 } 359 360 int 361 ahci_detach(device_t dev) 362 { 363 struct ahci_controller *ctlr = device_get_softc(dev); 364 int i; 365 366 /* Detach & delete all children */ 367 device_delete_children(dev); 368 369 /* Free interrupts. */ 370 for (i = 0; i < ctlr->numirqs; i++) { 371 if (ctlr->irqs[i].r_irq) { 372 bus_teardown_intr(dev, ctlr->irqs[i].r_irq, 373 ctlr->irqs[i].handle); 374 bus_release_resource(dev, SYS_RES_IRQ, 375 ctlr->irqs[i].r_irq_rid, ctlr->irqs[i].r_irq); 376 } 377 } 378 bus_dma_tag_destroy(ctlr->dma_tag); 379 /* Free memory. */ 380 rman_fini(&ctlr->sc_iomem); 381 ahci_free_mem(dev); 382 return (0); 383 } 384 385 void 386 ahci_free_mem(device_t dev) 387 { 388 struct ahci_controller *ctlr = device_get_softc(dev); 389 390 /* Release memory resources */ 391 if (ctlr->r_mem) 392 bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); 393 if (ctlr->r_msix_table) 394 bus_release_resource(dev, SYS_RES_MEMORY, 395 ctlr->r_msix_tab_rid, ctlr->r_msix_table); 396 if (ctlr->r_msix_pba) 397 bus_release_resource(dev, SYS_RES_MEMORY, 398 ctlr->r_msix_pba_rid, ctlr->r_msix_pba); 399 400 ctlr->r_msix_pba = ctlr->r_mem = ctlr->r_msix_table = NULL; 401 } 402 403 int 404 ahci_setup_interrupt(device_t dev) 405 { 406 struct ahci_controller *ctlr = device_get_softc(dev); 407 int i; 408 409 /* Check for single MSI vector fallback. */ 410 if (ctlr->numirqs > 1 && 411 (ATA_INL(ctlr->r_mem, AHCI_GHC) & AHCI_GHC_MRSM) != 0) { 412 device_printf(dev, "Falling back to one MSI\n"); 413 ctlr->numirqs = 1; 414 } 415 416 /* Ensure we don't overrun irqs. */ 417 if (ctlr->numirqs > AHCI_MAX_IRQS) { 418 device_printf(dev, "Too many irqs %d > %d (clamping)\n", 419 ctlr->numirqs, AHCI_MAX_IRQS); 420 ctlr->numirqs = AHCI_MAX_IRQS; 421 } 422 423 /* Allocate all IRQs. */ 424 for (i = 0; i < ctlr->numirqs; i++) { 425 ctlr->irqs[i].ctlr = ctlr; 426 ctlr->irqs[i].r_irq_rid = i + (ctlr->msi ? 1 : 0); 427 if (ctlr->channels == 1 && !ctlr->ccc && ctlr->msi) 428 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; 429 else if (ctlr->numirqs == 1 || i >= ctlr->channels || 430 (ctlr->ccc && i == ctlr->cccv)) 431 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ALL; 432 else if (ctlr->channels > ctlr->numirqs && 433 i == ctlr->numirqs - 1) 434 ctlr->irqs[i].mode = AHCI_IRQ_MODE_AFTER; 435 else 436 ctlr->irqs[i].mode = AHCI_IRQ_MODE_ONE; 437 if (!(ctlr->irqs[i].r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 438 &ctlr->irqs[i].r_irq_rid, RF_SHAREABLE | RF_ACTIVE))) { 439 device_printf(dev, "unable to map interrupt\n"); 440 return (ENXIO); 441 } 442 if ((bus_setup_intr(dev, ctlr->irqs[i].r_irq, ATA_INTR_FLAGS, NULL, 443 (ctlr->irqs[i].mode != AHCI_IRQ_MODE_ONE) ? ahci_intr : 444 ((ctlr->quirks & AHCI_Q_EDGEIS) ? ahci_intr_one_edge : 445 ahci_intr_one), 446 &ctlr->irqs[i], &ctlr->irqs[i].handle))) { 447 /* SOS XXX release r_irq */ 448 device_printf(dev, "unable to setup interrupt\n"); 449 return (ENXIO); 450 } 451 if (ctlr->numirqs > 1) { 452 bus_describe_intr(dev, ctlr->irqs[i].r_irq, 453 ctlr->irqs[i].handle, 454 ctlr->irqs[i].mode == AHCI_IRQ_MODE_ONE ? 455 "ch%d" : "%d", i); 456 } 457 } 458 return (0); 459 } 460 461 /* 462 * Common case interrupt handler. 463 */ 464 static void 465 ahci_intr(void *data) 466 { 467 struct ahci_controller_irq *irq = data; 468 struct ahci_controller *ctlr = irq->ctlr; 469 u_int32_t is, ise = 0; 470 void *arg; 471 int unit; 472 473 if (irq->mode == AHCI_IRQ_MODE_ALL) { 474 unit = 0; 475 if (ctlr->ccc) 476 is = ctlr->ichannels; 477 else 478 is = ATA_INL(ctlr->r_mem, AHCI_IS); 479 } else { /* AHCI_IRQ_MODE_AFTER */ 480 unit = irq->r_irq_rid - 1; 481 is = ATA_INL(ctlr->r_mem, AHCI_IS); 482 is &= (0xffffffff << unit); 483 } 484 /* CCC interrupt is edge triggered. */ 485 if (ctlr->ccc) 486 ise = 1 << ctlr->cccv; 487 /* Some controllers have edge triggered IS. */ 488 if (ctlr->quirks & AHCI_Q_EDGEIS) 489 ise |= is; 490 if (ise != 0) 491 ATA_OUTL(ctlr->r_mem, AHCI_IS, ise); 492 for (; unit < ctlr->channels; unit++) { 493 if ((is & (1 << unit)) != 0 && 494 (arg = ctlr->interrupt[unit].argument)) { 495 ctlr->interrupt[unit].function(arg); 496 } 497 } 498 /* AHCI declares level triggered IS. */ 499 if (!(ctlr->quirks & AHCI_Q_EDGEIS)) 500 ATA_OUTL(ctlr->r_mem, AHCI_IS, is); 501 ATA_RBL(ctlr->r_mem, AHCI_IS); 502 } 503 504 /* 505 * Simplified interrupt handler for multivector MSI mode. 506 */ 507 static void 508 ahci_intr_one(void *data) 509 { 510 struct ahci_controller_irq *irq = data; 511 struct ahci_controller *ctlr = irq->ctlr; 512 void *arg; 513 int unit; 514 515 unit = irq->r_irq_rid - 1; 516 if ((arg = ctlr->interrupt[unit].argument)) 517 ctlr->interrupt[unit].function(arg); 518 /* AHCI declares level triggered IS. */ 519 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); 520 ATA_RBL(ctlr->r_mem, AHCI_IS); 521 } 522 523 static void 524 ahci_intr_one_edge(void *data) 525 { 526 struct ahci_controller_irq *irq = data; 527 struct ahci_controller *ctlr = irq->ctlr; 528 void *arg; 529 int unit; 530 531 unit = irq->r_irq_rid - 1; 532 /* Some controllers have edge triggered IS. */ 533 ATA_OUTL(ctlr->r_mem, AHCI_IS, 1 << unit); 534 if ((arg = ctlr->interrupt[unit].argument)) 535 ctlr->interrupt[unit].function(arg); 536 ATA_RBL(ctlr->r_mem, AHCI_IS); 537 } 538 539 struct resource * 540 ahci_alloc_resource(device_t dev, device_t child, int type, int *rid, 541 rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 542 { 543 struct ahci_controller *ctlr = device_get_softc(dev); 544 struct resource *res; 545 rman_res_t st; 546 int offset, size, unit; 547 548 unit = (intptr_t)device_get_ivars(child); 549 res = NULL; 550 switch (type) { 551 case SYS_RES_MEMORY: 552 if (unit >= 0) { 553 offset = AHCI_OFFSET + (unit << 7); 554 size = 128; 555 } else if (*rid == 0) { 556 offset = AHCI_EM_CTL; 557 size = 4; 558 } else { 559 offset = (ctlr->emloc & 0xffff0000) >> 14; 560 size = (ctlr->emloc & 0x0000ffff) << 2; 561 if (*rid != 1) { 562 if (*rid == 2 && (ctlr->capsem & 563 (AHCI_EM_XMT | AHCI_EM_SMB)) == 0) 564 offset += size; 565 else 566 break; 567 } 568 } 569 st = rman_get_start(ctlr->r_mem); 570 res = rman_reserve_resource(&ctlr->sc_iomem, st + offset, 571 st + offset + size - 1, size, RF_ACTIVE, child); 572 if (res) { 573 bus_space_handle_t bsh; 574 bus_space_tag_t bst; 575 bsh = rman_get_bushandle(ctlr->r_mem); 576 bst = rman_get_bustag(ctlr->r_mem); 577 bus_space_subregion(bst, bsh, offset, 128, &bsh); 578 rman_set_bushandle(res, bsh); 579 rman_set_bustag(res, bst); 580 } 581 break; 582 case SYS_RES_IRQ: 583 if (*rid == ATA_IRQ_RID) 584 res = ctlr->irqs[0].r_irq; 585 break; 586 } 587 return (res); 588 } 589 590 int 591 ahci_release_resource(device_t dev, device_t child, int type, int rid, 592 struct resource *r) 593 { 594 595 switch (type) { 596 case SYS_RES_MEMORY: 597 rman_release_resource(r); 598 return (0); 599 case SYS_RES_IRQ: 600 if (rid != ATA_IRQ_RID) 601 return (ENOENT); 602 return (0); 603 } 604 return (EINVAL); 605 } 606 607 int 608 ahci_setup_intr(device_t dev, device_t child, struct resource *irq, 609 int flags, driver_filter_t *filter, driver_intr_t *function, 610 void *argument, void **cookiep) 611 { 612 struct ahci_controller *ctlr = device_get_softc(dev); 613 int unit = (intptr_t)device_get_ivars(child); 614 615 if (filter != NULL) { 616 printf("ahci.c: we cannot use a filter here\n"); 617 return (EINVAL); 618 } 619 ctlr->interrupt[unit].function = function; 620 ctlr->interrupt[unit].argument = argument; 621 return (0); 622 } 623 624 int 625 ahci_teardown_intr(device_t dev, device_t child, struct resource *irq, 626 void *cookie) 627 { 628 struct ahci_controller *ctlr = device_get_softc(dev); 629 int unit = (intptr_t)device_get_ivars(child); 630 631 ctlr->interrupt[unit].function = NULL; 632 ctlr->interrupt[unit].argument = NULL; 633 return (0); 634 } 635 636 int 637 ahci_print_child(device_t dev, device_t child) 638 { 639 int retval, channel; 640 641 retval = bus_print_child_header(dev, child); 642 channel = (int)(intptr_t)device_get_ivars(child); 643 if (channel >= 0) 644 retval += printf(" at channel %d", channel); 645 retval += bus_print_child_footer(dev, child); 646 return (retval); 647 } 648 649 int 650 ahci_child_location_str(device_t dev, device_t child, char *buf, 651 size_t buflen) 652 { 653 int channel; 654 655 channel = (int)(intptr_t)device_get_ivars(child); 656 if (channel >= 0) 657 snprintf(buf, buflen, "channel=%d", channel); 658 return (0); 659 } 660 661 bus_dma_tag_t 662 ahci_get_dma_tag(device_t dev, device_t child) 663 { 664 struct ahci_controller *ctlr = device_get_softc(dev); 665 666 return (ctlr->dma_tag); 667 } 668 669 static int 670 ahci_ch_probe(device_t dev) 671 { 672 673 device_set_desc_copy(dev, "AHCI channel"); 674 return (BUS_PROBE_DEFAULT); 675 } 676 677 static int 678 ahci_ch_disablephy_proc(SYSCTL_HANDLER_ARGS) 679 { 680 struct ahci_channel *ch; 681 int error, value; 682 683 ch = arg1; 684 value = ch->disablephy; 685 error = sysctl_handle_int(oidp, &value, 0, req); 686 if (error != 0 || req->newptr == NULL || (value != 0 && value != 1)) 687 return (error); 688 689 mtx_lock(&ch->mtx); 690 ch->disablephy = value; 691 if (value) { 692 ahci_ch_deinit(ch->dev); 693 } else { 694 ahci_ch_init(ch->dev); 695 ahci_phy_check_events(ch, ATA_SE_PHY_CHANGED | ATA_SE_EXCHANGED); 696 } 697 mtx_unlock(&ch->mtx); 698 699 return (0); 700 } 701 702 static int 703 ahci_ch_attach(device_t dev) 704 { 705 struct ahci_controller *ctlr = device_get_softc(device_get_parent(dev)); 706 struct ahci_channel *ch = device_get_softc(dev); 707 struct cam_devq *devq; 708 struct sysctl_ctx_list *ctx; 709 struct sysctl_oid *tree; 710 int rid, error, i, sata_rev = 0; 711 u_int32_t version; 712 713 ch->dev = dev; 714 ch->unit = (intptr_t)device_get_ivars(dev); 715 ch->caps = ctlr->caps; 716 ch->caps2 = ctlr->caps2; 717 ch->start = ctlr->ch_start; 718 ch->quirks = ctlr->quirks; 719 ch->vendorid = ctlr->vendorid; 720 ch->deviceid = ctlr->deviceid; 721 ch->subvendorid = ctlr->subvendorid; 722 ch->subdeviceid = ctlr->subdeviceid; 723 ch->numslots = ((ch->caps & AHCI_CAP_NCS) >> AHCI_CAP_NCS_SHIFT) + 1; 724 mtx_init(&ch->mtx, "AHCI channel lock", NULL, MTX_DEF); 725 ch->pm_level = 0; 726 resource_int_value(device_get_name(dev), 727 device_get_unit(dev), "pm_level", &ch->pm_level); 728 STAILQ_INIT(&ch->doneq); 729 if (ch->pm_level > 3) 730 callout_init_mtx(&ch->pm_timer, &ch->mtx, 0); 731 callout_init_mtx(&ch->reset_timer, &ch->mtx, 0); 732 /* JMicron external ports (0) sometimes limited */ 733 if ((ctlr->quirks & AHCI_Q_SATA1_UNIT0) && ch->unit == 0) 734 sata_rev = 1; 735 if (ch->quirks & AHCI_Q_SATA2) 736 sata_rev = 2; 737 resource_int_value(device_get_name(dev), 738 device_get_unit(dev), "sata_rev", &sata_rev); 739 for (i = 0; i < 16; i++) { 740 ch->user[i].revision = sata_rev; 741 ch->user[i].mode = 0; 742 ch->user[i].bytecount = 8192; 743 ch->user[i].tags = ch->numslots; 744 ch->user[i].caps = 0; 745 ch->curr[i] = ch->user[i]; 746 if (ch->pm_level) { 747 ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ | 748 CTS_SATA_CAPS_H_APST | 749 CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST; 750 } 751 ch->user[i].caps |= CTS_SATA_CAPS_H_DMAAA | 752 CTS_SATA_CAPS_H_AN; 753 } 754 rid = 0; 755 if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, 756 &rid, RF_ACTIVE))) 757 return (ENXIO); 758 ch->chcaps = ATA_INL(ch->r_mem, AHCI_P_CMD); 759 version = ATA_INL(ctlr->r_mem, AHCI_VS); 760 if (version < 0x00010200 && (ctlr->caps & AHCI_CAP_FBSS)) 761 ch->chcaps |= AHCI_P_CMD_FBSCP; 762 if (ch->caps2 & AHCI_CAP2_SDS) 763 ch->chscaps = ATA_INL(ch->r_mem, AHCI_P_DEVSLP); 764 if (bootverbose) { 765 device_printf(dev, "Caps:%s%s%s%s%s%s\n", 766 (ch->chcaps & AHCI_P_CMD_HPCP) ? " HPCP":"", 767 (ch->chcaps & AHCI_P_CMD_MPSP) ? " MPSP":"", 768 (ch->chcaps & AHCI_P_CMD_CPD) ? " CPD":"", 769 (ch->chcaps & AHCI_P_CMD_ESP) ? " ESP":"", 770 (ch->chcaps & AHCI_P_CMD_FBSCP) ? " FBSCP":"", 771 (ch->chscaps & AHCI_P_DEVSLP_DSP) ? " DSP":""); 772 } 773 ahci_dmainit(dev); 774 ahci_slotsalloc(dev); 775 mtx_lock(&ch->mtx); 776 ahci_ch_init(dev); 777 rid = ATA_IRQ_RID; 778 if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, 779 &rid, RF_SHAREABLE | RF_ACTIVE))) { 780 device_printf(dev, "Unable to map interrupt\n"); 781 error = ENXIO; 782 goto err0; 783 } 784 if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL, 785 ctlr->direct ? ahci_ch_intr_direct : ahci_ch_intr, 786 ch, &ch->ih))) { 787 device_printf(dev, "Unable to setup interrupt\n"); 788 error = ENXIO; 789 goto err1; 790 } 791 /* Create the device queue for our SIM. */ 792 devq = cam_simq_alloc(ch->numslots); 793 if (devq == NULL) { 794 device_printf(dev, "Unable to allocate simq\n"); 795 error = ENOMEM; 796 goto err1; 797 } 798 /* Construct SIM entry */ 799 ch->sim = cam_sim_alloc(ahciaction, ahcipoll, "ahcich", ch, 800 device_get_unit(dev), (struct mtx *)&ch->mtx, 801 (ch->quirks & AHCI_Q_NOCCS) ? 1 : min(2, ch->numslots), 802 (ch->caps & AHCI_CAP_SNCQ) ? ch->numslots : 0, 803 devq); 804 if (ch->sim == NULL) { 805 cam_simq_free(devq); 806 device_printf(dev, "unable to allocate sim\n"); 807 error = ENOMEM; 808 goto err1; 809 } 810 if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) { 811 device_printf(dev, "unable to register xpt bus\n"); 812 error = ENXIO; 813 goto err2; 814 } 815 if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim), 816 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 817 device_printf(dev, "unable to create path\n"); 818 error = ENXIO; 819 goto err3; 820 } 821 if (ch->pm_level > 3) { 822 callout_reset(&ch->pm_timer, 823 (ch->pm_level == 4) ? hz / 1000 : hz / 8, 824 ahci_ch_pm, ch); 825 } 826 mtx_unlock(&ch->mtx); 827 ctx = device_get_sysctl_ctx(dev); 828 tree = device_get_sysctl_tree(dev); 829 SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "disable_phy", 830 CTLFLAG_RW | CTLTYPE_UINT, ch, 0, ahci_ch_disablephy_proc, "IU", 831 "Disable PHY"); 832 return (0); 833 834 err3: 835 xpt_bus_deregister(cam_sim_path(ch->sim)); 836 err2: 837 cam_sim_free(ch->sim, /*free_devq*/TRUE); 838 err1: 839 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 840 err0: 841 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); 842 mtx_unlock(&ch->mtx); 843 mtx_destroy(&ch->mtx); 844 return (error); 845 } 846 847 static int 848 ahci_ch_detach(device_t dev) 849 { 850 struct ahci_channel *ch = device_get_softc(dev); 851 852 mtx_lock(&ch->mtx); 853 xpt_async(AC_LOST_DEVICE, ch->path, NULL); 854 /* Forget about reset. */ 855 if (ch->resetting) { 856 ch->resetting = 0; 857 xpt_release_simq(ch->sim, TRUE); 858 } 859 xpt_free_path(ch->path); 860 xpt_bus_deregister(cam_sim_path(ch->sim)); 861 cam_sim_free(ch->sim, /*free_devq*/TRUE); 862 mtx_unlock(&ch->mtx); 863 864 if (ch->pm_level > 3) 865 callout_drain(&ch->pm_timer); 866 callout_drain(&ch->reset_timer); 867 bus_teardown_intr(dev, ch->r_irq, ch->ih); 868 bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); 869 870 ahci_ch_deinit(dev); 871 ahci_slotsfree(dev); 872 ahci_dmafini(dev); 873 874 bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem); 875 mtx_destroy(&ch->mtx); 876 return (0); 877 } 878 879 static int 880 ahci_ch_init(device_t dev) 881 { 882 struct ahci_channel *ch = device_get_softc(dev); 883 uint64_t work; 884 885 /* Disable port interrupts */ 886 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 887 /* Setup work areas */ 888 work = ch->dma.work_bus + AHCI_CL_OFFSET; 889 ATA_OUTL(ch->r_mem, AHCI_P_CLB, work & 0xffffffff); 890 ATA_OUTL(ch->r_mem, AHCI_P_CLBU, work >> 32); 891 work = ch->dma.rfis_bus; 892 ATA_OUTL(ch->r_mem, AHCI_P_FB, work & 0xffffffff); 893 ATA_OUTL(ch->r_mem, AHCI_P_FBU, work >> 32); 894 /* Activate the channel and power/spin up device */ 895 ATA_OUTL(ch->r_mem, AHCI_P_CMD, 896 (AHCI_P_CMD_ACTIVE | AHCI_P_CMD_POD | AHCI_P_CMD_SUD | 897 ((ch->pm_level == 2 || ch->pm_level == 3) ? AHCI_P_CMD_ALPE : 0) | 898 ((ch->pm_level > 2) ? AHCI_P_CMD_ASP : 0 ))); 899 ahci_start_fr(ch); 900 ahci_start(ch, 1); 901 return (0); 902 } 903 904 static int 905 ahci_ch_deinit(device_t dev) 906 { 907 struct ahci_channel *ch = device_get_softc(dev); 908 909 /* Disable port interrupts. */ 910 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 911 /* Reset command register. */ 912 ahci_stop(ch); 913 ahci_stop_fr(ch); 914 ATA_OUTL(ch->r_mem, AHCI_P_CMD, 0); 915 /* Allow everything, including partial and slumber modes. */ 916 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 0); 917 /* Request slumber mode transition and give some time to get there. */ 918 ATA_OUTL(ch->r_mem, AHCI_P_CMD, AHCI_P_CMD_SLUMBER); 919 DELAY(100); 920 /* Disable PHY. */ 921 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); 922 return (0); 923 } 924 925 static int 926 ahci_ch_suspend(device_t dev) 927 { 928 struct ahci_channel *ch = device_get_softc(dev); 929 930 mtx_lock(&ch->mtx); 931 xpt_freeze_simq(ch->sim, 1); 932 /* Forget about reset. */ 933 if (ch->resetting) { 934 ch->resetting = 0; 935 callout_stop(&ch->reset_timer); 936 xpt_release_simq(ch->sim, TRUE); 937 } 938 while (ch->oslots) 939 msleep(ch, &ch->mtx, PRIBIO, "ahcisusp", hz/100); 940 ahci_ch_deinit(dev); 941 mtx_unlock(&ch->mtx); 942 return (0); 943 } 944 945 static int 946 ahci_ch_resume(device_t dev) 947 { 948 struct ahci_channel *ch = device_get_softc(dev); 949 950 mtx_lock(&ch->mtx); 951 ahci_ch_init(dev); 952 ahci_reset(ch); 953 xpt_release_simq(ch->sim, TRUE); 954 mtx_unlock(&ch->mtx); 955 return (0); 956 } 957 958 devclass_t ahcich_devclass; 959 static device_method_t ahcich_methods[] = { 960 DEVMETHOD(device_probe, ahci_ch_probe), 961 DEVMETHOD(device_attach, ahci_ch_attach), 962 DEVMETHOD(device_detach, ahci_ch_detach), 963 DEVMETHOD(device_suspend, ahci_ch_suspend), 964 DEVMETHOD(device_resume, ahci_ch_resume), 965 DEVMETHOD_END 966 }; 967 static driver_t ahcich_driver = { 968 "ahcich", 969 ahcich_methods, 970 sizeof(struct ahci_channel) 971 }; 972 DRIVER_MODULE(ahcich, ahci, ahcich_driver, ahcich_devclass, NULL, NULL); 973 974 struct ahci_dc_cb_args { 975 bus_addr_t maddr; 976 int error; 977 }; 978 979 static void 980 ahci_dmainit(device_t dev) 981 { 982 struct ahci_channel *ch = device_get_softc(dev); 983 struct ahci_dc_cb_args dcba; 984 size_t rfsize; 985 int error; 986 987 /* Command area. */ 988 error = bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0, 989 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 990 NULL, NULL, AHCI_WORK_SIZE, 1, AHCI_WORK_SIZE, 991 0, NULL, NULL, &ch->dma.work_tag); 992 if (error != 0) 993 goto error; 994 error = bus_dmamem_alloc(ch->dma.work_tag, (void **)&ch->dma.work, 995 BUS_DMA_ZERO, &ch->dma.work_map); 996 if (error != 0) 997 goto error; 998 error = bus_dmamap_load(ch->dma.work_tag, ch->dma.work_map, ch->dma.work, 999 AHCI_WORK_SIZE, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT); 1000 if (error != 0 || (error = dcba.error) != 0) { 1001 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); 1002 goto error; 1003 } 1004 ch->dma.work_bus = dcba.maddr; 1005 /* FIS receive area. */ 1006 if (ch->chcaps & AHCI_P_CMD_FBSCP) 1007 rfsize = 4096; 1008 else 1009 rfsize = 256; 1010 error = bus_dma_tag_create(bus_get_dma_tag(dev), rfsize, 0, 1011 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1012 NULL, NULL, rfsize, 1, rfsize, 1013 0, NULL, NULL, &ch->dma.rfis_tag); 1014 if (error != 0) 1015 goto error; 1016 error = bus_dmamem_alloc(ch->dma.rfis_tag, (void **)&ch->dma.rfis, 0, 1017 &ch->dma.rfis_map); 1018 if (error != 0) 1019 goto error; 1020 error = bus_dmamap_load(ch->dma.rfis_tag, ch->dma.rfis_map, ch->dma.rfis, 1021 rfsize, ahci_dmasetupc_cb, &dcba, BUS_DMA_NOWAIT); 1022 if (error != 0 || (error = dcba.error) != 0) { 1023 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); 1024 goto error; 1025 } 1026 ch->dma.rfis_bus = dcba.maddr; 1027 /* Data area. */ 1028 error = bus_dma_tag_create(bus_get_dma_tag(dev), 2, 0, 1029 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, 1030 NULL, NULL, 1031 AHCI_SG_ENTRIES * PAGE_SIZE * ch->numslots, 1032 AHCI_SG_ENTRIES, AHCI_PRD_MAX, 1033 0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag); 1034 if (error != 0) 1035 goto error; 1036 return; 1037 1038 error: 1039 device_printf(dev, "WARNING - DMA initialization failed, error %d\n", 1040 error); 1041 ahci_dmafini(dev); 1042 } 1043 1044 static void 1045 ahci_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error) 1046 { 1047 struct ahci_dc_cb_args *dcba = (struct ahci_dc_cb_args *)xsc; 1048 1049 if (!(dcba->error = error)) 1050 dcba->maddr = segs[0].ds_addr; 1051 } 1052 1053 static void 1054 ahci_dmafini(device_t dev) 1055 { 1056 struct ahci_channel *ch = device_get_softc(dev); 1057 1058 if (ch->dma.data_tag) { 1059 bus_dma_tag_destroy(ch->dma.data_tag); 1060 ch->dma.data_tag = NULL; 1061 } 1062 if (ch->dma.rfis_bus) { 1063 bus_dmamap_unload(ch->dma.rfis_tag, ch->dma.rfis_map); 1064 bus_dmamem_free(ch->dma.rfis_tag, ch->dma.rfis, ch->dma.rfis_map); 1065 ch->dma.rfis_bus = 0; 1066 ch->dma.rfis = NULL; 1067 } 1068 if (ch->dma.work_bus) { 1069 bus_dmamap_unload(ch->dma.work_tag, ch->dma.work_map); 1070 bus_dmamem_free(ch->dma.work_tag, ch->dma.work, ch->dma.work_map); 1071 ch->dma.work_bus = 0; 1072 ch->dma.work = NULL; 1073 } 1074 if (ch->dma.work_tag) { 1075 bus_dma_tag_destroy(ch->dma.work_tag); 1076 ch->dma.work_tag = NULL; 1077 } 1078 } 1079 1080 static void 1081 ahci_slotsalloc(device_t dev) 1082 { 1083 struct ahci_channel *ch = device_get_softc(dev); 1084 int i; 1085 1086 /* Alloc and setup command/dma slots */ 1087 bzero(ch->slot, sizeof(ch->slot)); 1088 for (i = 0; i < ch->numslots; i++) { 1089 struct ahci_slot *slot = &ch->slot[i]; 1090 1091 slot->ch = ch; 1092 slot->slot = i; 1093 slot->state = AHCI_SLOT_EMPTY; 1094 slot->ccb = NULL; 1095 callout_init_mtx(&slot->timeout, &ch->mtx, 0); 1096 1097 if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map)) 1098 device_printf(ch->dev, "FAILURE - create data_map\n"); 1099 } 1100 } 1101 1102 static void 1103 ahci_slotsfree(device_t dev) 1104 { 1105 struct ahci_channel *ch = device_get_softc(dev); 1106 int i; 1107 1108 /* Free all dma slots */ 1109 for (i = 0; i < ch->numslots; i++) { 1110 struct ahci_slot *slot = &ch->slot[i]; 1111 1112 callout_drain(&slot->timeout); 1113 if (slot->dma.data_map) { 1114 bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map); 1115 slot->dma.data_map = NULL; 1116 } 1117 } 1118 } 1119 1120 static int 1121 ahci_phy_check_events(struct ahci_channel *ch, u_int32_t serr) 1122 { 1123 1124 if (((ch->pm_level == 0) && (serr & ATA_SE_PHY_CHANGED)) || 1125 ((ch->pm_level != 0 || ch->listening) && (serr & ATA_SE_EXCHANGED))) { 1126 u_int32_t status = ATA_INL(ch->r_mem, AHCI_P_SSTS); 1127 union ccb *ccb; 1128 1129 if (bootverbose) { 1130 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) 1131 device_printf(ch->dev, "CONNECT requested\n"); 1132 else 1133 device_printf(ch->dev, "DISCONNECT requested\n"); 1134 } 1135 ahci_reset(ch); 1136 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 1137 return (0); 1138 if (xpt_create_path(&ccb->ccb_h.path, NULL, 1139 cam_sim_path(ch->sim), 1140 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1141 xpt_free_ccb(ccb); 1142 return (0); 1143 } 1144 xpt_rescan(ccb); 1145 return (1); 1146 } 1147 return (0); 1148 } 1149 1150 static void 1151 ahci_cpd_check_events(struct ahci_channel *ch) 1152 { 1153 u_int32_t status; 1154 union ccb *ccb; 1155 device_t dev; 1156 1157 if (ch->pm_level == 0) 1158 return; 1159 1160 status = ATA_INL(ch->r_mem, AHCI_P_CMD); 1161 if ((status & AHCI_P_CMD_CPD) == 0) 1162 return; 1163 1164 if (bootverbose) { 1165 dev = ch->dev; 1166 if (status & AHCI_P_CMD_CPS) { 1167 device_printf(dev, "COLD CONNECT requested\n"); 1168 } else 1169 device_printf(dev, "COLD DISCONNECT requested\n"); 1170 } 1171 ahci_reset(ch); 1172 if ((ccb = xpt_alloc_ccb_nowait()) == NULL) 1173 return; 1174 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(ch->sim), 1175 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) { 1176 xpt_free_ccb(ccb); 1177 return; 1178 } 1179 xpt_rescan(ccb); 1180 } 1181 1182 static void 1183 ahci_notify_events(struct ahci_channel *ch, u_int32_t status) 1184 { 1185 struct cam_path *dpath; 1186 int i; 1187 1188 if (ch->caps & AHCI_CAP_SSNTF) 1189 ATA_OUTL(ch->r_mem, AHCI_P_SNTF, status); 1190 if (bootverbose) 1191 device_printf(ch->dev, "SNTF 0x%04x\n", status); 1192 for (i = 0; i < 16; i++) { 1193 if ((status & (1 << i)) == 0) 1194 continue; 1195 if (xpt_create_path(&dpath, NULL, 1196 xpt_path_path_id(ch->path), i, 0) == CAM_REQ_CMP) { 1197 xpt_async(AC_SCSI_AEN, dpath, NULL); 1198 xpt_free_path(dpath); 1199 } 1200 } 1201 } 1202 1203 static void 1204 ahci_done(struct ahci_channel *ch, union ccb *ccb) 1205 { 1206 1207 mtx_assert(&ch->mtx, MA_OWNED); 1208 if ((ccb->ccb_h.func_code & XPT_FC_QUEUED) == 0 || 1209 ch->batch == 0) { 1210 xpt_done(ccb); 1211 return; 1212 } 1213 1214 STAILQ_INSERT_TAIL(&ch->doneq, &ccb->ccb_h, sim_links.stqe); 1215 } 1216 1217 static void 1218 ahci_ch_intr(void *arg) 1219 { 1220 struct ahci_channel *ch = (struct ahci_channel *)arg; 1221 uint32_t istatus; 1222 1223 /* Read interrupt statuses. */ 1224 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 1225 1226 mtx_lock(&ch->mtx); 1227 ahci_ch_intr_main(ch, istatus); 1228 mtx_unlock(&ch->mtx); 1229 } 1230 1231 static void 1232 ahci_ch_intr_direct(void *arg) 1233 { 1234 struct ahci_channel *ch = (struct ahci_channel *)arg; 1235 struct ccb_hdr *ccb_h; 1236 uint32_t istatus; 1237 STAILQ_HEAD(, ccb_hdr) tmp_doneq = STAILQ_HEAD_INITIALIZER(tmp_doneq); 1238 1239 /* Read interrupt statuses. */ 1240 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 1241 1242 mtx_lock(&ch->mtx); 1243 ch->batch = 1; 1244 ahci_ch_intr_main(ch, istatus); 1245 ch->batch = 0; 1246 /* 1247 * Prevent the possibility of issues caused by processing the queue 1248 * while unlocked below by moving the contents to a local queue. 1249 */ 1250 STAILQ_CONCAT(&tmp_doneq, &ch->doneq); 1251 mtx_unlock(&ch->mtx); 1252 while ((ccb_h = STAILQ_FIRST(&tmp_doneq)) != NULL) { 1253 STAILQ_REMOVE_HEAD(&tmp_doneq, sim_links.stqe); 1254 xpt_done_direct((union ccb *)ccb_h); 1255 } 1256 } 1257 1258 static void 1259 ahci_ch_pm(void *arg) 1260 { 1261 struct ahci_channel *ch = (struct ahci_channel *)arg; 1262 uint32_t work; 1263 1264 if (ch->numrslots != 0) 1265 return; 1266 work = ATA_INL(ch->r_mem, AHCI_P_CMD); 1267 if (ch->pm_level == 4) 1268 work |= AHCI_P_CMD_PARTIAL; 1269 else 1270 work |= AHCI_P_CMD_SLUMBER; 1271 ATA_OUTL(ch->r_mem, AHCI_P_CMD, work); 1272 } 1273 1274 static void 1275 ahci_ch_intr_main(struct ahci_channel *ch, uint32_t istatus) 1276 { 1277 uint32_t cstatus, serr = 0, sntf = 0, ok, err; 1278 enum ahci_err_type et; 1279 int i, ccs, port, reset = 0; 1280 1281 /* Clear interrupt statuses. */ 1282 ATA_OUTL(ch->r_mem, AHCI_P_IS, istatus); 1283 /* Read command statuses. */ 1284 if (ch->numtslots != 0) 1285 cstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); 1286 else 1287 cstatus = 0; 1288 if (ch->numrslots != ch->numtslots) 1289 cstatus |= ATA_INL(ch->r_mem, AHCI_P_CI); 1290 /* Read SNTF in one of possible ways. */ 1291 if ((istatus & AHCI_P_IX_SDB) && 1292 (ch->pm_present || ch->curr[0].atapi != 0)) { 1293 if (ch->caps & AHCI_CAP_SSNTF) 1294 sntf = ATA_INL(ch->r_mem, AHCI_P_SNTF); 1295 else if (ch->fbs_enabled) { 1296 u_int8_t *fis = ch->dma.rfis + 0x58; 1297 1298 for (i = 0; i < 16; i++) { 1299 if (fis[1] & 0x80) { 1300 fis[1] &= 0x7f; 1301 sntf |= 1 << i; 1302 } 1303 fis += 256; 1304 } 1305 } else { 1306 u_int8_t *fis = ch->dma.rfis + 0x58; 1307 1308 if (fis[1] & 0x80) 1309 sntf = (1 << (fis[1] & 0x0f)); 1310 } 1311 } 1312 /* Process PHY events */ 1313 if (istatus & (AHCI_P_IX_PC | AHCI_P_IX_PRC | AHCI_P_IX_OF | 1314 AHCI_P_IX_IF | AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { 1315 serr = ATA_INL(ch->r_mem, AHCI_P_SERR); 1316 if (serr) { 1317 ATA_OUTL(ch->r_mem, AHCI_P_SERR, serr); 1318 reset = ahci_phy_check_events(ch, serr); 1319 } 1320 } 1321 /* Process cold presence detection events */ 1322 if ((istatus & AHCI_P_IX_CPD) && !reset) 1323 ahci_cpd_check_events(ch); 1324 /* Process command errors */ 1325 if (istatus & (AHCI_P_IX_OF | AHCI_P_IX_IF | 1326 AHCI_P_IX_HBD | AHCI_P_IX_HBF | AHCI_P_IX_TFE)) { 1327 if (ch->quirks & AHCI_Q_NOCCS) { 1328 /* 1329 * ASMedia chips sometimes report failed commands as 1330 * completed. Count all running commands as failed. 1331 */ 1332 cstatus |= ch->rslots; 1333 1334 /* They also report wrong CCS, so try to guess one. */ 1335 ccs = powerof2(cstatus) ? ffs(cstatus) - 1 : -1; 1336 } else { 1337 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & 1338 AHCI_P_CMD_CCS_MASK) >> AHCI_P_CMD_CCS_SHIFT; 1339 } 1340 //device_printf(dev, "%s ERROR is %08x cs %08x ss %08x rs %08x tfd %02x serr %08x fbs %08x ccs %d\n", 1341 // __func__, istatus, cstatus, sstatus, ch->rslots, ATA_INL(ch->r_mem, AHCI_P_TFD), 1342 // serr, ATA_INL(ch->r_mem, AHCI_P_FBS), ccs); 1343 port = -1; 1344 if (ch->fbs_enabled) { 1345 uint32_t fbs = ATA_INL(ch->r_mem, AHCI_P_FBS); 1346 if (fbs & AHCI_P_FBS_SDE) { 1347 port = (fbs & AHCI_P_FBS_DWE) 1348 >> AHCI_P_FBS_DWE_SHIFT; 1349 } else { 1350 for (i = 0; i < 16; i++) { 1351 if (ch->numrslotspd[i] == 0) 1352 continue; 1353 if (port == -1) 1354 port = i; 1355 else if (port != i) { 1356 port = -2; 1357 break; 1358 } 1359 } 1360 } 1361 } 1362 err = ch->rslots & cstatus; 1363 } else { 1364 ccs = 0; 1365 err = 0; 1366 port = -1; 1367 } 1368 /* Complete all successful commands. */ 1369 ok = ch->rslots & ~cstatus; 1370 for (i = 0; i < ch->numslots; i++) { 1371 if ((ok >> i) & 1) 1372 ahci_end_transaction(&ch->slot[i], AHCI_ERR_NONE); 1373 } 1374 /* On error, complete the rest of commands with error statuses. */ 1375 if (err) { 1376 if (ch->frozen) { 1377 union ccb *fccb = ch->frozen; 1378 ch->frozen = NULL; 1379 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 1380 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 1381 xpt_freeze_devq(fccb->ccb_h.path, 1); 1382 fccb->ccb_h.status |= CAM_DEV_QFRZN; 1383 } 1384 ahci_done(ch, fccb); 1385 } 1386 for (i = 0; i < ch->numslots; i++) { 1387 /* XXX: reqests in loading state. */ 1388 if (((err >> i) & 1) == 0) 1389 continue; 1390 if (port >= 0 && 1391 ch->slot[i].ccb->ccb_h.target_id != port) 1392 continue; 1393 if (istatus & AHCI_P_IX_TFE) { 1394 if (port != -2) { 1395 /* Task File Error */ 1396 if (ch->numtslotspd[ 1397 ch->slot[i].ccb->ccb_h.target_id] == 0) { 1398 /* Untagged operation. */ 1399 if (i == ccs) 1400 et = AHCI_ERR_TFE; 1401 else 1402 et = AHCI_ERR_INNOCENT; 1403 } else { 1404 /* Tagged operation. */ 1405 et = AHCI_ERR_NCQ; 1406 } 1407 } else { 1408 et = AHCI_ERR_TFE; 1409 ch->fatalerr = 1; 1410 } 1411 } else if (istatus & AHCI_P_IX_IF) { 1412 if (ch->numtslots == 0 && i != ccs && port != -2) 1413 et = AHCI_ERR_INNOCENT; 1414 else 1415 et = AHCI_ERR_SATA; 1416 } else 1417 et = AHCI_ERR_INVALID; 1418 ahci_end_transaction(&ch->slot[i], et); 1419 } 1420 /* 1421 * We can't reinit port if there are some other 1422 * commands active, use resume to complete them. 1423 */ 1424 if (ch->rslots != 0 && !ch->recoverycmd) 1425 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | AHCI_P_FBS_DEC); 1426 } 1427 /* Process NOTIFY events */ 1428 if (sntf) 1429 ahci_notify_events(ch, sntf); 1430 } 1431 1432 /* Must be called with channel locked. */ 1433 static int 1434 ahci_check_collision(struct ahci_channel *ch, union ccb *ccb) 1435 { 1436 int t = ccb->ccb_h.target_id; 1437 1438 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1439 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1440 /* Tagged command while we have no supported tag free. */ 1441 if (((~ch->oslots) & (0xffffffff >> (32 - 1442 ch->curr[t].tags))) == 0) 1443 return (1); 1444 /* If we have FBS */ 1445 if (ch->fbs_enabled) { 1446 /* Tagged command while untagged are active. */ 1447 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] == 0) 1448 return (1); 1449 } else { 1450 /* Tagged command while untagged are active. */ 1451 if (ch->numrslots != 0 && ch->numtslots == 0) 1452 return (1); 1453 /* Tagged command while tagged to other target is active. */ 1454 if (ch->numtslots != 0 && 1455 ch->taggedtarget != ccb->ccb_h.target_id) 1456 return (1); 1457 } 1458 } else { 1459 /* If we have FBS */ 1460 if (ch->fbs_enabled) { 1461 /* Untagged command while tagged are active. */ 1462 if (ch->numrslotspd[t] != 0 && ch->numtslotspd[t] != 0) 1463 return (1); 1464 } else { 1465 /* Untagged command while tagged are active. */ 1466 if (ch->numrslots != 0 && ch->numtslots != 0) 1467 return (1); 1468 } 1469 } 1470 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1471 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) { 1472 /* Atomic command while anything active. */ 1473 if (ch->numrslots != 0) 1474 return (1); 1475 } 1476 /* We have some atomic command running. */ 1477 if (ch->aslots != 0) 1478 return (1); 1479 return (0); 1480 } 1481 1482 /* Must be called with channel locked. */ 1483 static void 1484 ahci_begin_transaction(struct ahci_channel *ch, union ccb *ccb) 1485 { 1486 struct ahci_slot *slot; 1487 int tag, tags; 1488 1489 /* Choose empty slot. */ 1490 tags = ch->numslots; 1491 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1492 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) 1493 tags = ch->curr[ccb->ccb_h.target_id].tags; 1494 if (ch->lastslot + 1 < tags) 1495 tag = ffs(~(ch->oslots >> (ch->lastslot + 1))); 1496 else 1497 tag = 0; 1498 if (tag == 0 || tag + ch->lastslot >= tags) 1499 tag = ffs(~ch->oslots) - 1; 1500 else 1501 tag += ch->lastslot; 1502 ch->lastslot = tag; 1503 /* Occupy chosen slot. */ 1504 slot = &ch->slot[tag]; 1505 slot->ccb = ccb; 1506 /* Stop PM timer. */ 1507 if (ch->numrslots == 0 && ch->pm_level > 3) 1508 callout_stop(&ch->pm_timer); 1509 /* Update channel stats. */ 1510 ch->oslots |= (1 << tag); 1511 ch->numrslots++; 1512 ch->numrslotspd[ccb->ccb_h.target_id]++; 1513 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1514 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1515 ch->numtslots++; 1516 ch->numtslotspd[ccb->ccb_h.target_id]++; 1517 ch->taggedtarget = ccb->ccb_h.target_id; 1518 } 1519 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1520 (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT))) 1521 ch->aslots |= (1 << tag); 1522 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1523 slot->state = AHCI_SLOT_LOADING; 1524 bus_dmamap_load_ccb(ch->dma.data_tag, slot->dma.data_map, ccb, 1525 ahci_dmasetprd, slot, 0); 1526 } else { 1527 slot->dma.nsegs = 0; 1528 ahci_execute_transaction(slot); 1529 } 1530 } 1531 1532 /* Locked by busdma engine. */ 1533 static void 1534 ahci_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error) 1535 { 1536 struct ahci_slot *slot = arg; 1537 struct ahci_channel *ch = slot->ch; 1538 struct ahci_cmd_tab *ctp; 1539 struct ahci_dma_prd *prd; 1540 int i; 1541 1542 if (error) { 1543 device_printf(ch->dev, "DMA load error\n"); 1544 ahci_end_transaction(slot, AHCI_ERR_INVALID); 1545 return; 1546 } 1547 KASSERT(nsegs <= AHCI_SG_ENTRIES, ("too many DMA segment entries\n")); 1548 /* Get a piece of the workspace for this request */ 1549 ctp = (struct ahci_cmd_tab *) 1550 (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); 1551 /* Fill S/G table */ 1552 prd = &ctp->prd_tab[0]; 1553 for (i = 0; i < nsegs; i++) { 1554 prd[i].dba = htole64(segs[i].ds_addr); 1555 prd[i].dbc = htole32((segs[i].ds_len - 1) & AHCI_PRD_MASK); 1556 } 1557 slot->dma.nsegs = nsegs; 1558 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, 1559 ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ? 1560 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE)); 1561 ahci_execute_transaction(slot); 1562 } 1563 1564 /* Must be called with channel locked. */ 1565 static void 1566 ahci_execute_transaction(struct ahci_slot *slot) 1567 { 1568 struct ahci_channel *ch = slot->ch; 1569 struct ahci_cmd_tab *ctp; 1570 struct ahci_cmd_list *clp; 1571 union ccb *ccb = slot->ccb; 1572 int port = ccb->ccb_h.target_id & 0x0f; 1573 int fis_size, i, softreset; 1574 uint8_t *fis = ch->dma.rfis + 0x40; 1575 uint8_t val; 1576 uint16_t cmd_flags; 1577 1578 /* Get a piece of the workspace for this request */ 1579 ctp = (struct ahci_cmd_tab *) 1580 (ch->dma.work + AHCI_CT_OFFSET + (AHCI_CT_SIZE * slot->slot)); 1581 /* Setup the FIS for this request */ 1582 if (!(fis_size = ahci_setup_fis(ch, ctp, ccb, slot->slot))) { 1583 device_printf(ch->dev, "Setting up SATA FIS failed\n"); 1584 ahci_end_transaction(slot, AHCI_ERR_INVALID); 1585 return; 1586 } 1587 /* Setup the command list entry */ 1588 clp = (struct ahci_cmd_list *) 1589 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); 1590 cmd_flags = 1591 (ccb->ccb_h.flags & CAM_DIR_OUT ? AHCI_CMD_WRITE : 0) | 1592 (ccb->ccb_h.func_code == XPT_SCSI_IO ? 1593 (AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH) : 0) | 1594 (fis_size / sizeof(u_int32_t)) | 1595 (port << 12); 1596 clp->prd_length = htole16(slot->dma.nsegs); 1597 /* Special handling for Soft Reset command. */ 1598 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1599 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) { 1600 if (ccb->ataio.cmd.control & ATA_A_RESET) { 1601 softreset = 1; 1602 /* Kick controller into sane state */ 1603 ahci_stop(ch); 1604 ahci_clo(ch); 1605 ahci_start(ch, 0); 1606 cmd_flags |= AHCI_CMD_RESET | AHCI_CMD_CLR_BUSY; 1607 } else { 1608 softreset = 2; 1609 /* Prepare FIS receive area for check. */ 1610 for (i = 0; i < 20; i++) 1611 fis[i] = 0xff; 1612 } 1613 } else 1614 softreset = 0; 1615 clp->bytecount = 0; 1616 clp->cmd_flags = htole16(cmd_flags); 1617 clp->cmd_table_phys = htole64(ch->dma.work_bus + AHCI_CT_OFFSET + 1618 (AHCI_CT_SIZE * slot->slot)); 1619 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 1620 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1621 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, 1622 BUS_DMASYNC_PREREAD); 1623 /* Set ACTIVE bit for NCQ commands. */ 1624 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1625 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1626 ATA_OUTL(ch->r_mem, AHCI_P_SACT, 1 << slot->slot); 1627 } 1628 /* If FBS is enabled, set PMP port. */ 1629 if (ch->fbs_enabled) { 1630 ATA_OUTL(ch->r_mem, AHCI_P_FBS, AHCI_P_FBS_EN | 1631 (port << AHCI_P_FBS_DEV_SHIFT)); 1632 } 1633 /* Issue command to the controller. */ 1634 slot->state = AHCI_SLOT_RUNNING; 1635 ch->rslots |= (1 << slot->slot); 1636 ATA_OUTL(ch->r_mem, AHCI_P_CI, (1 << slot->slot)); 1637 /* Device reset commands doesn't interrupt. Poll them. */ 1638 if (ccb->ccb_h.func_code == XPT_ATA_IO && 1639 (ccb->ataio.cmd.command == ATA_DEVICE_RESET || softreset)) { 1640 int count, timeout = ccb->ccb_h.timeout * 100; 1641 enum ahci_err_type et = AHCI_ERR_NONE; 1642 1643 for (count = 0; count < timeout; count++) { 1644 DELAY(10); 1645 if (!(ATA_INL(ch->r_mem, AHCI_P_CI) & (1 << slot->slot))) 1646 break; 1647 if ((ATA_INL(ch->r_mem, AHCI_P_TFD) & ATA_S_ERROR) && 1648 softreset != 1) { 1649 #if 0 1650 device_printf(ch->dev, 1651 "Poll error on slot %d, TFD: %04x\n", 1652 slot->slot, ATA_INL(ch->r_mem, AHCI_P_TFD)); 1653 #endif 1654 et = AHCI_ERR_TFE; 1655 break; 1656 } 1657 /* Workaround for ATI SB600/SB700 chipsets. */ 1658 if (ccb->ccb_h.target_id == 15 && 1659 (ch->quirks & AHCI_Q_ATI_PMP_BUG) && 1660 (ATA_INL(ch->r_mem, AHCI_P_IS) & AHCI_P_IX_IPM)) { 1661 et = AHCI_ERR_TIMEOUT; 1662 break; 1663 } 1664 } 1665 1666 /* 1667 * Some Marvell controllers require additional time 1668 * after soft reset to work properly. Setup delay 1669 * to 50ms after soft reset. 1670 */ 1671 if (ch->quirks & AHCI_Q_MRVL_SR_DEL) 1672 DELAY(50000); 1673 1674 /* 1675 * Marvell HBAs with non-RAID firmware do not wait for 1676 * readiness after soft reset, so we have to wait here. 1677 * Marvell RAIDs do not have this problem, but instead 1678 * sometimes forget to update FIS receive area, breaking 1679 * this wait. 1680 */ 1681 if ((ch->quirks & AHCI_Q_NOBSYRES) == 0 && 1682 (ch->quirks & AHCI_Q_ATI_PMP_BUG) == 0 && 1683 softreset == 2 && et == AHCI_ERR_NONE) { 1684 for ( ; count < timeout; count++) { 1685 bus_dmamap_sync(ch->dma.rfis_tag, 1686 ch->dma.rfis_map, BUS_DMASYNC_POSTREAD); 1687 val = fis[2]; 1688 bus_dmamap_sync(ch->dma.rfis_tag, 1689 ch->dma.rfis_map, BUS_DMASYNC_PREREAD); 1690 if ((val & ATA_S_BUSY) == 0) 1691 break; 1692 DELAY(10); 1693 } 1694 } 1695 1696 if (timeout && (count >= timeout)) { 1697 device_printf(ch->dev, "Poll timeout on slot %d port %d\n", 1698 slot->slot, port); 1699 device_printf(ch->dev, "is %08x cs %08x ss %08x " 1700 "rs %08x tfd %02x serr %08x cmd %08x\n", 1701 ATA_INL(ch->r_mem, AHCI_P_IS), 1702 ATA_INL(ch->r_mem, AHCI_P_CI), 1703 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, 1704 ATA_INL(ch->r_mem, AHCI_P_TFD), 1705 ATA_INL(ch->r_mem, AHCI_P_SERR), 1706 ATA_INL(ch->r_mem, AHCI_P_CMD)); 1707 et = AHCI_ERR_TIMEOUT; 1708 } 1709 1710 /* Kick controller into sane state and enable FBS. */ 1711 if (softreset == 2) 1712 ch->eslots |= (1 << slot->slot); 1713 ahci_end_transaction(slot, et); 1714 return; 1715 } 1716 /* Start command execution timeout */ 1717 callout_reset_sbt(&slot->timeout, SBT_1MS * ccb->ccb_h.timeout / 2, 1718 0, (timeout_t*)ahci_timeout, slot, 0); 1719 return; 1720 } 1721 1722 /* Must be called with channel locked. */ 1723 static void 1724 ahci_process_timeout(struct ahci_channel *ch) 1725 { 1726 int i; 1727 1728 mtx_assert(&ch->mtx, MA_OWNED); 1729 /* Handle the rest of commands. */ 1730 for (i = 0; i < ch->numslots; i++) { 1731 /* Do we have a running request on slot? */ 1732 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 1733 continue; 1734 ahci_end_transaction(&ch->slot[i], AHCI_ERR_TIMEOUT); 1735 } 1736 } 1737 1738 /* Must be called with channel locked. */ 1739 static void 1740 ahci_rearm_timeout(struct ahci_channel *ch) 1741 { 1742 int i; 1743 1744 mtx_assert(&ch->mtx, MA_OWNED); 1745 for (i = 0; i < ch->numslots; i++) { 1746 struct ahci_slot *slot = &ch->slot[i]; 1747 1748 /* Do we have a running request on slot? */ 1749 if (slot->state < AHCI_SLOT_RUNNING) 1750 continue; 1751 if ((ch->toslots & (1 << i)) == 0) 1752 continue; 1753 callout_reset_sbt(&slot->timeout, 1754 SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, 1755 (timeout_t*)ahci_timeout, slot, 0); 1756 } 1757 } 1758 1759 /* Locked by callout mechanism. */ 1760 static void 1761 ahci_timeout(struct ahci_slot *slot) 1762 { 1763 struct ahci_channel *ch = slot->ch; 1764 device_t dev = ch->dev; 1765 uint32_t sstatus; 1766 int ccs; 1767 int i; 1768 1769 /* Check for stale timeout. */ 1770 if (slot->state < AHCI_SLOT_RUNNING) 1771 return; 1772 1773 /* Check if slot was not being executed last time we checked. */ 1774 if (slot->state < AHCI_SLOT_EXECUTING) { 1775 /* Check if slot started executing. */ 1776 sstatus = ATA_INL(ch->r_mem, AHCI_P_SACT); 1777 ccs = (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CCS_MASK) 1778 >> AHCI_P_CMD_CCS_SHIFT; 1779 if ((sstatus & (1 << slot->slot)) != 0 || ccs == slot->slot || 1780 ch->fbs_enabled || ch->wrongccs) 1781 slot->state = AHCI_SLOT_EXECUTING; 1782 else if ((ch->rslots & (1 << ccs)) == 0) { 1783 ch->wrongccs = 1; 1784 slot->state = AHCI_SLOT_EXECUTING; 1785 } 1786 1787 callout_reset_sbt(&slot->timeout, 1788 SBT_1MS * slot->ccb->ccb_h.timeout / 2, 0, 1789 (timeout_t*)ahci_timeout, slot, 0); 1790 return; 1791 } 1792 1793 device_printf(dev, "Timeout on slot %d port %d\n", 1794 slot->slot, slot->ccb->ccb_h.target_id & 0x0f); 1795 device_printf(dev, "is %08x cs %08x ss %08x rs %08x tfd %02x " 1796 "serr %08x cmd %08x\n", 1797 ATA_INL(ch->r_mem, AHCI_P_IS), ATA_INL(ch->r_mem, AHCI_P_CI), 1798 ATA_INL(ch->r_mem, AHCI_P_SACT), ch->rslots, 1799 ATA_INL(ch->r_mem, AHCI_P_TFD), ATA_INL(ch->r_mem, AHCI_P_SERR), 1800 ATA_INL(ch->r_mem, AHCI_P_CMD)); 1801 1802 /* Handle frozen command. */ 1803 if (ch->frozen) { 1804 union ccb *fccb = ch->frozen; 1805 ch->frozen = NULL; 1806 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 1807 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 1808 xpt_freeze_devq(fccb->ccb_h.path, 1); 1809 fccb->ccb_h.status |= CAM_DEV_QFRZN; 1810 } 1811 ahci_done(ch, fccb); 1812 } 1813 if (!ch->fbs_enabled && !ch->wrongccs) { 1814 /* Without FBS we know real timeout source. */ 1815 ch->fatalerr = 1; 1816 /* Handle command with timeout. */ 1817 ahci_end_transaction(&ch->slot[slot->slot], AHCI_ERR_TIMEOUT); 1818 /* Handle the rest of commands. */ 1819 for (i = 0; i < ch->numslots; i++) { 1820 /* Do we have a running request on slot? */ 1821 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 1822 continue; 1823 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); 1824 } 1825 } else { 1826 /* With FBS we wait for other commands timeout and pray. */ 1827 if (ch->toslots == 0) 1828 xpt_freeze_simq(ch->sim, 1); 1829 ch->toslots |= (1 << slot->slot); 1830 if ((ch->rslots & ~ch->toslots) == 0) 1831 ahci_process_timeout(ch); 1832 else 1833 device_printf(dev, " ... waiting for slots %08x\n", 1834 ch->rslots & ~ch->toslots); 1835 } 1836 } 1837 1838 /* Must be called with channel locked. */ 1839 static void 1840 ahci_end_transaction(struct ahci_slot *slot, enum ahci_err_type et) 1841 { 1842 struct ahci_channel *ch = slot->ch; 1843 union ccb *ccb = slot->ccb; 1844 struct ahci_cmd_list *clp; 1845 int lastto; 1846 uint32_t sig; 1847 1848 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 1849 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1850 clp = (struct ahci_cmd_list *) 1851 (ch->dma.work + AHCI_CL_OFFSET + (AHCI_CL_SIZE * slot->slot)); 1852 /* Read result registers to the result struct 1853 * May be incorrect if several commands finished same time, 1854 * so read only when sure or have to. 1855 */ 1856 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 1857 struct ata_res *res = &ccb->ataio.res; 1858 1859 if ((et == AHCI_ERR_TFE) || 1860 (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) { 1861 u_int8_t *fis = ch->dma.rfis + 0x40; 1862 1863 bus_dmamap_sync(ch->dma.rfis_tag, ch->dma.rfis_map, 1864 BUS_DMASYNC_POSTREAD); 1865 if (ch->fbs_enabled) { 1866 fis += ccb->ccb_h.target_id * 256; 1867 res->status = fis[2]; 1868 res->error = fis[3]; 1869 } else { 1870 uint16_t tfd = ATA_INL(ch->r_mem, AHCI_P_TFD); 1871 1872 res->status = tfd; 1873 res->error = tfd >> 8; 1874 } 1875 res->lba_low = fis[4]; 1876 res->lba_mid = fis[5]; 1877 res->lba_high = fis[6]; 1878 res->device = fis[7]; 1879 res->lba_low_exp = fis[8]; 1880 res->lba_mid_exp = fis[9]; 1881 res->lba_high_exp = fis[10]; 1882 res->sector_count = fis[12]; 1883 res->sector_count_exp = fis[13]; 1884 1885 /* 1886 * Some weird controllers do not return signature in 1887 * FIS receive area. Read it from PxSIG register. 1888 */ 1889 if ((ch->quirks & AHCI_Q_ALTSIG) && 1890 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1891 (ccb->ataio.cmd.control & ATA_A_RESET) == 0) { 1892 sig = ATA_INL(ch->r_mem, AHCI_P_SIG); 1893 res->lba_high = sig >> 24; 1894 res->lba_mid = sig >> 16; 1895 res->lba_low = sig >> 8; 1896 res->sector_count = sig; 1897 } 1898 } else 1899 bzero(res, sizeof(*res)); 1900 if ((ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) == 0 && 1901 (ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1902 (ch->quirks & AHCI_Q_NOCOUNT) == 0) { 1903 ccb->ataio.resid = 1904 ccb->ataio.dxfer_len - le32toh(clp->bytecount); 1905 } 1906 } else { 1907 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 1908 (ch->quirks & AHCI_Q_NOCOUNT) == 0) { 1909 ccb->csio.resid = 1910 ccb->csio.dxfer_len - le32toh(clp->bytecount); 1911 } 1912 } 1913 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) { 1914 bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map, 1915 (ccb->ccb_h.flags & CAM_DIR_IN) ? 1916 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE); 1917 bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map); 1918 } 1919 if (et != AHCI_ERR_NONE) 1920 ch->eslots |= (1 << slot->slot); 1921 /* In case of error, freeze device for proper recovery. */ 1922 if ((et != AHCI_ERR_NONE) && (!ch->recoverycmd) && 1923 !(ccb->ccb_h.status & CAM_DEV_QFRZN)) { 1924 xpt_freeze_devq(ccb->ccb_h.path, 1); 1925 ccb->ccb_h.status |= CAM_DEV_QFRZN; 1926 } 1927 /* Set proper result status. */ 1928 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1929 switch (et) { 1930 case AHCI_ERR_NONE: 1931 ccb->ccb_h.status |= CAM_REQ_CMP; 1932 if (ccb->ccb_h.func_code == XPT_SCSI_IO) 1933 ccb->csio.scsi_status = SCSI_STATUS_OK; 1934 break; 1935 case AHCI_ERR_INVALID: 1936 ch->fatalerr = 1; 1937 ccb->ccb_h.status |= CAM_REQ_INVALID; 1938 break; 1939 case AHCI_ERR_INNOCENT: 1940 ccb->ccb_h.status |= CAM_REQUEUE_REQ; 1941 break; 1942 case AHCI_ERR_TFE: 1943 case AHCI_ERR_NCQ: 1944 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 1945 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR; 1946 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND; 1947 } else { 1948 ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR; 1949 } 1950 break; 1951 case AHCI_ERR_SATA: 1952 ch->fatalerr = 1; 1953 if (!ch->recoverycmd) { 1954 xpt_freeze_simq(ch->sim, 1); 1955 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1956 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1957 } 1958 ccb->ccb_h.status |= CAM_UNCOR_PARITY; 1959 break; 1960 case AHCI_ERR_TIMEOUT: 1961 if (!ch->recoverycmd) { 1962 xpt_freeze_simq(ch->sim, 1); 1963 ccb->ccb_h.status &= ~CAM_STATUS_MASK; 1964 ccb->ccb_h.status |= CAM_RELEASE_SIMQ; 1965 } 1966 ccb->ccb_h.status |= CAM_CMD_TIMEOUT; 1967 break; 1968 default: 1969 ch->fatalerr = 1; 1970 ccb->ccb_h.status |= CAM_REQ_CMP_ERR; 1971 } 1972 /* Free slot. */ 1973 ch->oslots &= ~(1 << slot->slot); 1974 ch->rslots &= ~(1 << slot->slot); 1975 ch->aslots &= ~(1 << slot->slot); 1976 slot->state = AHCI_SLOT_EMPTY; 1977 slot->ccb = NULL; 1978 /* Update channel stats. */ 1979 ch->numrslots--; 1980 ch->numrslotspd[ccb->ccb_h.target_id]--; 1981 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1982 (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) { 1983 ch->numtslots--; 1984 ch->numtslotspd[ccb->ccb_h.target_id]--; 1985 } 1986 /* Cancel timeout state if request completed normally. */ 1987 if (et != AHCI_ERR_TIMEOUT) { 1988 lastto = (ch->toslots == (1 << slot->slot)); 1989 ch->toslots &= ~(1 << slot->slot); 1990 if (lastto) 1991 xpt_release_simq(ch->sim, TRUE); 1992 } 1993 /* If it was first request of reset sequence and there is no error, 1994 * proceed to second request. */ 1995 if ((ccb->ccb_h.func_code == XPT_ATA_IO) && 1996 (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && 1997 (ccb->ataio.cmd.control & ATA_A_RESET) && 1998 et == AHCI_ERR_NONE) { 1999 ccb->ataio.cmd.control &= ~ATA_A_RESET; 2000 ahci_begin_transaction(ch, ccb); 2001 return; 2002 } 2003 /* If it was our READ LOG command - process it. */ 2004 if (ccb->ccb_h.recovery_type == RECOVERY_READ_LOG) { 2005 ahci_process_read_log(ch, ccb); 2006 /* If it was our REQUEST SENSE command - process it. */ 2007 } else if (ccb->ccb_h.recovery_type == RECOVERY_REQUEST_SENSE) { 2008 ahci_process_request_sense(ch, ccb); 2009 /* If it was NCQ or ATAPI command error, put result on hold. */ 2010 } else if (et == AHCI_ERR_NCQ || 2011 ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_SCSI_STATUS_ERROR && 2012 (ccb->ccb_h.flags & CAM_DIS_AUTOSENSE) == 0)) { 2013 ch->hold[slot->slot] = ccb; 2014 ch->numhslots++; 2015 } else 2016 ahci_done(ch, ccb); 2017 /* If we have no other active commands, ... */ 2018 if (ch->rslots == 0) { 2019 /* if there was fatal error - reset port. */ 2020 if (ch->toslots != 0 || ch->fatalerr) { 2021 ahci_reset(ch); 2022 } else { 2023 /* if we have slots in error, we can reinit port. */ 2024 if (ch->eslots != 0) { 2025 ahci_stop(ch); 2026 ahci_clo(ch); 2027 ahci_start(ch, 1); 2028 } 2029 /* if there commands on hold, we can do READ LOG. */ 2030 if (!ch->recoverycmd && ch->numhslots) 2031 ahci_issue_recovery(ch); 2032 } 2033 /* If all the rest of commands are in timeout - give them chance. */ 2034 } else if ((ch->rslots & ~ch->toslots) == 0 && 2035 et != AHCI_ERR_TIMEOUT) 2036 ahci_rearm_timeout(ch); 2037 /* Unfreeze frozen command. */ 2038 if (ch->frozen && !ahci_check_collision(ch, ch->frozen)) { 2039 union ccb *fccb = ch->frozen; 2040 ch->frozen = NULL; 2041 ahci_begin_transaction(ch, fccb); 2042 xpt_release_simq(ch->sim, TRUE); 2043 } 2044 /* Start PM timer. */ 2045 if (ch->numrslots == 0 && ch->pm_level > 3 && 2046 (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) { 2047 callout_schedule(&ch->pm_timer, 2048 (ch->pm_level == 4) ? hz / 1000 : hz / 8); 2049 } 2050 } 2051 2052 static void 2053 ahci_issue_recovery(struct ahci_channel *ch) 2054 { 2055 union ccb *ccb; 2056 struct ccb_ataio *ataio; 2057 struct ccb_scsiio *csio; 2058 int i; 2059 2060 /* Find some held command. */ 2061 for (i = 0; i < ch->numslots; i++) { 2062 if (ch->hold[i]) 2063 break; 2064 } 2065 ccb = xpt_alloc_ccb_nowait(); 2066 if (ccb == NULL) { 2067 device_printf(ch->dev, "Unable to allocate recovery command\n"); 2068 completeall: 2069 /* We can't do anything -- complete held commands. */ 2070 for (i = 0; i < ch->numslots; i++) { 2071 if (ch->hold[i] == NULL) 2072 continue; 2073 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2074 ch->hold[i]->ccb_h.status |= CAM_RESRC_UNAVAIL; 2075 ahci_done(ch, ch->hold[i]); 2076 ch->hold[i] = NULL; 2077 ch->numhslots--; 2078 } 2079 ahci_reset(ch); 2080 return; 2081 } 2082 ccb->ccb_h = ch->hold[i]->ccb_h; /* Reuse old header. */ 2083 if (ccb->ccb_h.func_code == XPT_ATA_IO) { 2084 /* READ LOG */ 2085 ccb->ccb_h.recovery_type = RECOVERY_READ_LOG; 2086 ccb->ccb_h.func_code = XPT_ATA_IO; 2087 ccb->ccb_h.flags = CAM_DIR_IN; 2088 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ 2089 ataio = &ccb->ataio; 2090 ataio->data_ptr = malloc(512, M_AHCI, M_NOWAIT); 2091 if (ataio->data_ptr == NULL) { 2092 xpt_free_ccb(ccb); 2093 device_printf(ch->dev, 2094 "Unable to allocate memory for READ LOG command\n"); 2095 goto completeall; 2096 } 2097 ataio->dxfer_len = 512; 2098 bzero(&ataio->cmd, sizeof(ataio->cmd)); 2099 ataio->cmd.flags = CAM_ATAIO_48BIT; 2100 ataio->cmd.command = 0x2F; /* READ LOG EXT */ 2101 ataio->cmd.sector_count = 1; 2102 ataio->cmd.sector_count_exp = 0; 2103 ataio->cmd.lba_low = 0x10; 2104 ataio->cmd.lba_mid = 0; 2105 ataio->cmd.lba_mid_exp = 0; 2106 } else { 2107 /* REQUEST SENSE */ 2108 ccb->ccb_h.recovery_type = RECOVERY_REQUEST_SENSE; 2109 ccb->ccb_h.recovery_slot = i; 2110 ccb->ccb_h.func_code = XPT_SCSI_IO; 2111 ccb->ccb_h.flags = CAM_DIR_IN; 2112 ccb->ccb_h.status = 0; 2113 ccb->ccb_h.timeout = 1000; /* 1s should be enough. */ 2114 csio = &ccb->csio; 2115 csio->data_ptr = (void *)&ch->hold[i]->csio.sense_data; 2116 csio->dxfer_len = ch->hold[i]->csio.sense_len; 2117 csio->cdb_len = 6; 2118 bzero(&csio->cdb_io, sizeof(csio->cdb_io)); 2119 csio->cdb_io.cdb_bytes[0] = 0x03; 2120 csio->cdb_io.cdb_bytes[4] = csio->dxfer_len; 2121 } 2122 /* Freeze SIM while doing recovery. */ 2123 ch->recoverycmd = 1; 2124 xpt_freeze_simq(ch->sim, 1); 2125 ahci_begin_transaction(ch, ccb); 2126 } 2127 2128 static void 2129 ahci_process_read_log(struct ahci_channel *ch, union ccb *ccb) 2130 { 2131 uint8_t *data; 2132 struct ata_res *res; 2133 int i; 2134 2135 ch->recoverycmd = 0; 2136 2137 data = ccb->ataio.data_ptr; 2138 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP && 2139 (data[0] & 0x80) == 0) { 2140 for (i = 0; i < ch->numslots; i++) { 2141 if (!ch->hold[i]) 2142 continue; 2143 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) 2144 continue; 2145 if ((data[0] & 0x1F) == i) { 2146 res = &ch->hold[i]->ataio.res; 2147 res->status = data[2]; 2148 res->error = data[3]; 2149 res->lba_low = data[4]; 2150 res->lba_mid = data[5]; 2151 res->lba_high = data[6]; 2152 res->device = data[7]; 2153 res->lba_low_exp = data[8]; 2154 res->lba_mid_exp = data[9]; 2155 res->lba_high_exp = data[10]; 2156 res->sector_count = data[12]; 2157 res->sector_count_exp = data[13]; 2158 } else { 2159 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2160 ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ; 2161 } 2162 ahci_done(ch, ch->hold[i]); 2163 ch->hold[i] = NULL; 2164 ch->numhslots--; 2165 } 2166 } else { 2167 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP) 2168 device_printf(ch->dev, "Error while READ LOG EXT\n"); 2169 else if ((data[0] & 0x80) == 0) { 2170 device_printf(ch->dev, "Non-queued command error in READ LOG EXT\n"); 2171 } 2172 for (i = 0; i < ch->numslots; i++) { 2173 if (!ch->hold[i]) 2174 continue; 2175 if (ch->hold[i]->ccb_h.func_code != XPT_ATA_IO) 2176 continue; 2177 ahci_done(ch, ch->hold[i]); 2178 ch->hold[i] = NULL; 2179 ch->numhslots--; 2180 } 2181 } 2182 free(ccb->ataio.data_ptr, M_AHCI); 2183 xpt_free_ccb(ccb); 2184 xpt_release_simq(ch->sim, TRUE); 2185 } 2186 2187 static void 2188 ahci_process_request_sense(struct ahci_channel *ch, union ccb *ccb) 2189 { 2190 int i; 2191 2192 ch->recoverycmd = 0; 2193 2194 i = ccb->ccb_h.recovery_slot; 2195 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP) { 2196 ch->hold[i]->ccb_h.status |= CAM_AUTOSNS_VALID; 2197 } else { 2198 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK; 2199 ch->hold[i]->ccb_h.status |= CAM_AUTOSENSE_FAIL; 2200 } 2201 ahci_done(ch, ch->hold[i]); 2202 ch->hold[i] = NULL; 2203 ch->numhslots--; 2204 xpt_free_ccb(ccb); 2205 xpt_release_simq(ch->sim, TRUE); 2206 } 2207 2208 static void 2209 ahci_start(struct ahci_channel *ch, int fbs) 2210 { 2211 u_int32_t cmd; 2212 2213 /* Run the channel start callback, if any. */ 2214 if (ch->start) 2215 ch->start(ch); 2216 2217 /* Clear SATA error register */ 2218 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xFFFFFFFF); 2219 /* Clear any interrupts pending on this channel */ 2220 ATA_OUTL(ch->r_mem, AHCI_P_IS, 0xFFFFFFFF); 2221 /* Configure FIS-based switching if supported. */ 2222 if (ch->chcaps & AHCI_P_CMD_FBSCP) { 2223 ch->fbs_enabled = (fbs && ch->pm_present) ? 1 : 0; 2224 ATA_OUTL(ch->r_mem, AHCI_P_FBS, 2225 ch->fbs_enabled ? AHCI_P_FBS_EN : 0); 2226 } 2227 /* Start operations on this channel */ 2228 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2229 cmd &= ~AHCI_P_CMD_PMA; 2230 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_ST | 2231 (ch->pm_present ? AHCI_P_CMD_PMA : 0)); 2232 } 2233 2234 static void 2235 ahci_stop(struct ahci_channel *ch) 2236 { 2237 u_int32_t cmd; 2238 int timeout; 2239 2240 /* Kill all activity on this channel */ 2241 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2242 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_ST); 2243 /* Wait for activity stop. */ 2244 timeout = 0; 2245 do { 2246 DELAY(10); 2247 if (timeout++ > 50000) { 2248 device_printf(ch->dev, "stopping AHCI engine failed\n"); 2249 break; 2250 } 2251 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CR); 2252 ch->eslots = 0; 2253 } 2254 2255 static void 2256 ahci_clo(struct ahci_channel *ch) 2257 { 2258 u_int32_t cmd; 2259 int timeout; 2260 2261 /* Issue Command List Override if supported */ 2262 if (ch->caps & AHCI_CAP_SCLO) { 2263 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2264 cmd |= AHCI_P_CMD_CLO; 2265 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd); 2266 timeout = 0; 2267 do { 2268 DELAY(10); 2269 if (timeout++ > 50000) { 2270 device_printf(ch->dev, "executing CLO failed\n"); 2271 break; 2272 } 2273 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_CLO); 2274 } 2275 } 2276 2277 static void 2278 ahci_stop_fr(struct ahci_channel *ch) 2279 { 2280 u_int32_t cmd; 2281 int timeout; 2282 2283 /* Kill all FIS reception on this channel */ 2284 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2285 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd & ~AHCI_P_CMD_FRE); 2286 /* Wait for FIS reception stop. */ 2287 timeout = 0; 2288 do { 2289 DELAY(10); 2290 if (timeout++ > 50000) { 2291 device_printf(ch->dev, "stopping AHCI FR engine failed\n"); 2292 break; 2293 } 2294 } while (ATA_INL(ch->r_mem, AHCI_P_CMD) & AHCI_P_CMD_FR); 2295 } 2296 2297 static void 2298 ahci_start_fr(struct ahci_channel *ch) 2299 { 2300 u_int32_t cmd; 2301 2302 /* Start FIS reception on this channel */ 2303 cmd = ATA_INL(ch->r_mem, AHCI_P_CMD); 2304 ATA_OUTL(ch->r_mem, AHCI_P_CMD, cmd | AHCI_P_CMD_FRE); 2305 } 2306 2307 static int 2308 ahci_wait_ready(struct ahci_channel *ch, int t, int t0) 2309 { 2310 int timeout = 0; 2311 uint32_t val; 2312 2313 while ((val = ATA_INL(ch->r_mem, AHCI_P_TFD)) & 2314 (ATA_S_BUSY | ATA_S_DRQ)) { 2315 if (timeout > t) { 2316 if (t != 0) { 2317 device_printf(ch->dev, 2318 "AHCI reset: device not ready after %dms " 2319 "(tfd = %08x)\n", 2320 MAX(t, 0) + t0, val); 2321 } 2322 return (EBUSY); 2323 } 2324 DELAY(1000); 2325 timeout++; 2326 } 2327 if (bootverbose) 2328 device_printf(ch->dev, "AHCI reset: device ready after %dms\n", 2329 timeout + t0); 2330 return (0); 2331 } 2332 2333 static void 2334 ahci_reset_to(void *arg) 2335 { 2336 struct ahci_channel *ch = arg; 2337 2338 if (ch->resetting == 0) 2339 return; 2340 ch->resetting--; 2341 if (ahci_wait_ready(ch, ch->resetting == 0 ? -1 : 0, 2342 (310 - ch->resetting) * 100) == 0) { 2343 ch->resetting = 0; 2344 ahci_start(ch, 1); 2345 xpt_release_simq(ch->sim, TRUE); 2346 return; 2347 } 2348 if (ch->resetting == 0) { 2349 ahci_clo(ch); 2350 ahci_start(ch, 1); 2351 xpt_release_simq(ch->sim, TRUE); 2352 return; 2353 } 2354 callout_schedule(&ch->reset_timer, hz / 10); 2355 } 2356 2357 static void 2358 ahci_reset(struct ahci_channel *ch) 2359 { 2360 struct ahci_controller *ctlr = device_get_softc(device_get_parent(ch->dev)); 2361 int i; 2362 2363 xpt_freeze_simq(ch->sim, 1); 2364 if (bootverbose) 2365 device_printf(ch->dev, "AHCI reset...\n"); 2366 /* Forget about previous reset. */ 2367 if (ch->resetting) { 2368 ch->resetting = 0; 2369 callout_stop(&ch->reset_timer); 2370 xpt_release_simq(ch->sim, TRUE); 2371 } 2372 /* Requeue freezed command. */ 2373 if (ch->frozen) { 2374 union ccb *fccb = ch->frozen; 2375 ch->frozen = NULL; 2376 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ; 2377 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) { 2378 xpt_freeze_devq(fccb->ccb_h.path, 1); 2379 fccb->ccb_h.status |= CAM_DEV_QFRZN; 2380 } 2381 ahci_done(ch, fccb); 2382 } 2383 /* Kill the engine and requeue all running commands. */ 2384 ahci_stop(ch); 2385 for (i = 0; i < ch->numslots; i++) { 2386 /* Do we have a running request on slot? */ 2387 if (ch->slot[i].state < AHCI_SLOT_RUNNING) 2388 continue; 2389 /* XXX; Commands in loading state. */ 2390 ahci_end_transaction(&ch->slot[i], AHCI_ERR_INNOCENT); 2391 } 2392 for (i = 0; i < ch->numslots; i++) { 2393 if (!ch->hold[i]) 2394 continue; 2395 ahci_done(ch, ch->hold[i]); 2396 ch->hold[i] = NULL; 2397 ch->numhslots--; 2398 } 2399 if (ch->toslots != 0) 2400 xpt_release_simq(ch->sim, TRUE); 2401 ch->eslots = 0; 2402 ch->toslots = 0; 2403 ch->wrongccs = 0; 2404 ch->fatalerr = 0; 2405 /* Tell the XPT about the event */ 2406 xpt_async(AC_BUS_RESET, ch->path, NULL); 2407 /* Disable port interrupts */ 2408 ATA_OUTL(ch->r_mem, AHCI_P_IE, 0); 2409 /* Reset and reconnect PHY, */ 2410 if (!ahci_sata_phy_reset(ch)) { 2411 if (bootverbose) 2412 device_printf(ch->dev, 2413 "AHCI reset: device not found\n"); 2414 ch->devices = 0; 2415 /* Enable wanted port interrupts */ 2416 ATA_OUTL(ch->r_mem, AHCI_P_IE, 2417 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | 2418 AHCI_P_IX_PRC | AHCI_P_IX_PC)); 2419 xpt_release_simq(ch->sim, TRUE); 2420 return; 2421 } 2422 if (bootverbose) 2423 device_printf(ch->dev, "AHCI reset: device found\n"); 2424 /* Wait for clearing busy status. */ 2425 if (ahci_wait_ready(ch, dumping ? 31000 : 0, 0)) { 2426 if (dumping) 2427 ahci_clo(ch); 2428 else 2429 ch->resetting = 310; 2430 } 2431 ch->devices = 1; 2432 /* Enable wanted port interrupts */ 2433 ATA_OUTL(ch->r_mem, AHCI_P_IE, 2434 (((ch->pm_level != 0) ? AHCI_P_IX_CPD | AHCI_P_IX_MP : 0) | 2435 AHCI_P_IX_TFE | AHCI_P_IX_HBF | 2436 AHCI_P_IX_HBD | AHCI_P_IX_IF | AHCI_P_IX_OF | 2437 ((ch->pm_level == 0) ? AHCI_P_IX_PRC : 0) | AHCI_P_IX_PC | 2438 AHCI_P_IX_DP | AHCI_P_IX_UF | (ctlr->ccc ? 0 : AHCI_P_IX_SDB) | 2439 AHCI_P_IX_DS | AHCI_P_IX_PS | (ctlr->ccc ? 0 : AHCI_P_IX_DHR))); 2440 if (ch->resetting) 2441 callout_reset(&ch->reset_timer, hz / 10, ahci_reset_to, ch); 2442 else { 2443 ahci_start(ch, 1); 2444 xpt_release_simq(ch->sim, TRUE); 2445 } 2446 } 2447 2448 static int 2449 ahci_setup_fis(struct ahci_channel *ch, struct ahci_cmd_tab *ctp, union ccb *ccb, int tag) 2450 { 2451 u_int8_t *fis = &ctp->cfis[0]; 2452 2453 bzero(fis, 20); 2454 fis[0] = 0x27; /* host to device */ 2455 fis[1] = (ccb->ccb_h.target_id & 0x0f); 2456 if (ccb->ccb_h.func_code == XPT_SCSI_IO) { 2457 fis[1] |= 0x80; 2458 fis[2] = ATA_PACKET_CMD; 2459 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE && 2460 ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA) 2461 fis[3] = ATA_F_DMA; 2462 else { 2463 fis[5] = ccb->csio.dxfer_len; 2464 fis[6] = ccb->csio.dxfer_len >> 8; 2465 } 2466 fis[7] = ATA_D_LBA; 2467 fis[15] = ATA_A_4BIT; 2468 bcopy((ccb->ccb_h.flags & CAM_CDB_POINTER) ? 2469 ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes, 2470 ctp->acmd, ccb->csio.cdb_len); 2471 bzero(ctp->acmd + ccb->csio.cdb_len, 32 - ccb->csio.cdb_len); 2472 } else if ((ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) == 0) { 2473 fis[1] |= 0x80; 2474 fis[2] = ccb->ataio.cmd.command; 2475 fis[3] = ccb->ataio.cmd.features; 2476 fis[4] = ccb->ataio.cmd.lba_low; 2477 fis[5] = ccb->ataio.cmd.lba_mid; 2478 fis[6] = ccb->ataio.cmd.lba_high; 2479 fis[7] = ccb->ataio.cmd.device; 2480 fis[8] = ccb->ataio.cmd.lba_low_exp; 2481 fis[9] = ccb->ataio.cmd.lba_mid_exp; 2482 fis[10] = ccb->ataio.cmd.lba_high_exp; 2483 fis[11] = ccb->ataio.cmd.features_exp; 2484 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) { 2485 fis[12] = tag << 3; 2486 } else { 2487 fis[12] = ccb->ataio.cmd.sector_count; 2488 } 2489 fis[13] = ccb->ataio.cmd.sector_count_exp; 2490 fis[15] = ATA_A_4BIT; 2491 } else { 2492 fis[15] = ccb->ataio.cmd.control; 2493 } 2494 if (ccb->ataio.ata_flags & ATA_FLAG_AUX) { 2495 fis[16] = ccb->ataio.aux & 0xff; 2496 fis[17] = (ccb->ataio.aux >> 8) & 0xff; 2497 fis[18] = (ccb->ataio.aux >> 16) & 0xff; 2498 fis[19] = (ccb->ataio.aux >> 24) & 0xff; 2499 } 2500 return (20); 2501 } 2502 2503 static int 2504 ahci_sata_connect(struct ahci_channel *ch) 2505 { 2506 u_int32_t status; 2507 int timeout, found = 0; 2508 2509 /* Wait up to 100ms for "connect well" */ 2510 for (timeout = 0; timeout < 1000 ; timeout++) { 2511 status = ATA_INL(ch->r_mem, AHCI_P_SSTS); 2512 if ((status & ATA_SS_DET_MASK) != ATA_SS_DET_NO_DEVICE) 2513 found = 1; 2514 if (((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_ONLINE) && 2515 ((status & ATA_SS_SPD_MASK) != ATA_SS_SPD_NO_SPEED) && 2516 ((status & ATA_SS_IPM_MASK) == ATA_SS_IPM_ACTIVE)) 2517 break; 2518 if ((status & ATA_SS_DET_MASK) == ATA_SS_DET_PHY_OFFLINE) { 2519 if (bootverbose) { 2520 device_printf(ch->dev, "SATA offline status=%08x\n", 2521 status); 2522 } 2523 return (0); 2524 } 2525 if (found == 0 && timeout >= 100) 2526 break; 2527 DELAY(100); 2528 } 2529 if (timeout >= 1000 || !found) { 2530 if (bootverbose) { 2531 device_printf(ch->dev, 2532 "SATA connect timeout time=%dus status=%08x\n", 2533 timeout * 100, status); 2534 } 2535 return (0); 2536 } 2537 if (bootverbose) { 2538 device_printf(ch->dev, "SATA connect time=%dus status=%08x\n", 2539 timeout * 100, status); 2540 } 2541 /* Clear SATA error register */ 2542 ATA_OUTL(ch->r_mem, AHCI_P_SERR, 0xffffffff); 2543 return (1); 2544 } 2545 2546 static int 2547 ahci_sata_phy_reset(struct ahci_channel *ch) 2548 { 2549 int sata_rev; 2550 uint32_t val, detval; 2551 2552 if (ch->listening) { 2553 val = ATA_INL(ch->r_mem, AHCI_P_CMD); 2554 val |= AHCI_P_CMD_SUD; 2555 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); 2556 ch->listening = 0; 2557 } 2558 sata_rev = ch->user[ch->pm_present ? 15 : 0].revision; 2559 if (sata_rev == 1) 2560 val = ATA_SC_SPD_SPEED_GEN1; 2561 else if (sata_rev == 2) 2562 val = ATA_SC_SPD_SPEED_GEN2; 2563 else if (sata_rev == 3) 2564 val = ATA_SC_SPD_SPEED_GEN3; 2565 else 2566 val = 0; 2567 detval = ahci_ch_detval(ch, ATA_SC_DET_RESET); 2568 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 2569 detval | val | 2570 ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER); 2571 DELAY(1000); 2572 detval = ahci_ch_detval(ch, ATA_SC_DET_IDLE); 2573 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, 2574 detval | val | ((ch->pm_level > 0) ? 0 : 2575 (ATA_SC_IPM_DIS_PARTIAL | ATA_SC_IPM_DIS_SLUMBER))); 2576 if (!ahci_sata_connect(ch)) { 2577 if (ch->caps & AHCI_CAP_SSS) { 2578 val = ATA_INL(ch->r_mem, AHCI_P_CMD); 2579 val &= ~AHCI_P_CMD_SUD; 2580 ATA_OUTL(ch->r_mem, AHCI_P_CMD, val); 2581 ch->listening = 1; 2582 } else if (ch->pm_level > 0) 2583 ATA_OUTL(ch->r_mem, AHCI_P_SCTL, ATA_SC_DET_DISABLE); 2584 return (0); 2585 } 2586 return (1); 2587 } 2588 2589 static int 2590 ahci_check_ids(struct ahci_channel *ch, union ccb *ccb) 2591 { 2592 2593 if (ccb->ccb_h.target_id > ((ch->caps & AHCI_CAP_SPM) ? 15 : 0)) { 2594 ccb->ccb_h.status = CAM_TID_INVALID; 2595 ahci_done(ch, ccb); 2596 return (-1); 2597 } 2598 if (ccb->ccb_h.target_lun != 0) { 2599 ccb->ccb_h.status = CAM_LUN_INVALID; 2600 ahci_done(ch, ccb); 2601 return (-1); 2602 } 2603 return (0); 2604 } 2605 2606 static void 2607 ahciaction(struct cam_sim *sim, union ccb *ccb) 2608 { 2609 struct ahci_channel *ch; 2610 2611 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciaction func_code=%x\n", 2612 ccb->ccb_h.func_code)); 2613 2614 ch = (struct ahci_channel *)cam_sim_softc(sim); 2615 switch (ccb->ccb_h.func_code) { 2616 /* Common cases first */ 2617 case XPT_ATA_IO: /* Execute the requested I/O operation */ 2618 case XPT_SCSI_IO: 2619 if (ahci_check_ids(ch, ccb)) 2620 return; 2621 if (ch->devices == 0 || 2622 (ch->pm_present == 0 && 2623 ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) { 2624 ccb->ccb_h.status = CAM_SEL_TIMEOUT; 2625 break; 2626 } 2627 ccb->ccb_h.recovery_type = RECOVERY_NONE; 2628 /* Check for command collision. */ 2629 if (ahci_check_collision(ch, ccb)) { 2630 /* Freeze command. */ 2631 ch->frozen = ccb; 2632 /* We have only one frozen slot, so freeze simq also. */ 2633 xpt_freeze_simq(ch->sim, 1); 2634 return; 2635 } 2636 ahci_begin_transaction(ch, ccb); 2637 return; 2638 case XPT_ABORT: /* Abort the specified CCB */ 2639 /* XXX Implement */ 2640 ccb->ccb_h.status = CAM_REQ_INVALID; 2641 break; 2642 case XPT_SET_TRAN_SETTINGS: 2643 { 2644 struct ccb_trans_settings *cts = &ccb->cts; 2645 struct ahci_device *d; 2646 2647 if (ahci_check_ids(ch, ccb)) 2648 return; 2649 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 2650 d = &ch->curr[ccb->ccb_h.target_id]; 2651 else 2652 d = &ch->user[ccb->ccb_h.target_id]; 2653 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) 2654 d->revision = cts->xport_specific.sata.revision; 2655 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) 2656 d->mode = cts->xport_specific.sata.mode; 2657 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) 2658 d->bytecount = min(8192, cts->xport_specific.sata.bytecount); 2659 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS) 2660 d->tags = min(ch->numslots, cts->xport_specific.sata.tags); 2661 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM) 2662 ch->pm_present = cts->xport_specific.sata.pm_present; 2663 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) 2664 d->atapi = cts->xport_specific.sata.atapi; 2665 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) 2666 d->caps = cts->xport_specific.sata.caps; 2667 ccb->ccb_h.status = CAM_REQ_CMP; 2668 break; 2669 } 2670 case XPT_GET_TRAN_SETTINGS: 2671 /* Get default/user set transfer settings for the target */ 2672 { 2673 struct ccb_trans_settings *cts = &ccb->cts; 2674 struct ahci_device *d; 2675 uint32_t status; 2676 2677 if (ahci_check_ids(ch, ccb)) 2678 return; 2679 if (cts->type == CTS_TYPE_CURRENT_SETTINGS) 2680 d = &ch->curr[ccb->ccb_h.target_id]; 2681 else 2682 d = &ch->user[ccb->ccb_h.target_id]; 2683 cts->protocol = PROTO_UNSPECIFIED; 2684 cts->protocol_version = PROTO_VERSION_UNSPECIFIED; 2685 cts->transport = XPORT_SATA; 2686 cts->transport_version = XPORT_VERSION_UNSPECIFIED; 2687 cts->proto_specific.valid = 0; 2688 cts->xport_specific.sata.valid = 0; 2689 if (cts->type == CTS_TYPE_CURRENT_SETTINGS && 2690 (ccb->ccb_h.target_id == 15 || 2691 (ccb->ccb_h.target_id == 0 && !ch->pm_present))) { 2692 status = ATA_INL(ch->r_mem, AHCI_P_SSTS) & ATA_SS_SPD_MASK; 2693 if (status & 0x0f0) { 2694 cts->xport_specific.sata.revision = 2695 (status & 0x0f0) >> 4; 2696 cts->xport_specific.sata.valid |= 2697 CTS_SATA_VALID_REVISION; 2698 } 2699 cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; 2700 if (ch->pm_level) { 2701 if (ch->caps & (AHCI_CAP_PSC | AHCI_CAP_SSC)) 2702 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; 2703 if (ch->caps2 & AHCI_CAP2_APST) 2704 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_APST; 2705 } 2706 if ((ch->caps & AHCI_CAP_SNCQ) && 2707 (ch->quirks & AHCI_Q_NOAA) == 0) 2708 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_DMAAA; 2709 cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_AN; 2710 cts->xport_specific.sata.caps &= 2711 ch->user[ccb->ccb_h.target_id].caps; 2712 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 2713 } else { 2714 cts->xport_specific.sata.revision = d->revision; 2715 cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; 2716 cts->xport_specific.sata.caps = d->caps; 2717 cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; 2718 } 2719 cts->xport_specific.sata.mode = d->mode; 2720 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; 2721 cts->xport_specific.sata.bytecount = d->bytecount; 2722 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; 2723 cts->xport_specific.sata.pm_present = ch->pm_present; 2724 cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM; 2725 cts->xport_specific.sata.tags = d->tags; 2726 cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS; 2727 cts->xport_specific.sata.atapi = d->atapi; 2728 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; 2729 ccb->ccb_h.status = CAM_REQ_CMP; 2730 break; 2731 } 2732 case XPT_RESET_BUS: /* Reset the specified SCSI bus */ 2733 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ 2734 ahci_reset(ch); 2735 ccb->ccb_h.status = CAM_REQ_CMP; 2736 break; 2737 case XPT_TERM_IO: /* Terminate the I/O process */ 2738 /* XXX Implement */ 2739 ccb->ccb_h.status = CAM_REQ_INVALID; 2740 break; 2741 case XPT_PATH_INQ: /* Path routing inquiry */ 2742 { 2743 struct ccb_pathinq *cpi = &ccb->cpi; 2744 2745 cpi->version_num = 1; /* XXX??? */ 2746 cpi->hba_inquiry = PI_SDTR_ABLE; 2747 if (ch->caps & AHCI_CAP_SNCQ) 2748 cpi->hba_inquiry |= PI_TAG_ABLE; 2749 if (ch->caps & AHCI_CAP_SPM) 2750 cpi->hba_inquiry |= PI_SATAPM; 2751 cpi->target_sprt = 0; 2752 cpi->hba_misc = PIM_SEQSCAN | PIM_UNMAPPED; 2753 if ((ch->quirks & AHCI_Q_NOAUX) == 0) 2754 cpi->hba_misc |= PIM_ATA_EXT; 2755 cpi->hba_eng_cnt = 0; 2756 if (ch->caps & AHCI_CAP_SPM) 2757 cpi->max_target = 15; 2758 else 2759 cpi->max_target = 0; 2760 cpi->max_lun = 0; 2761 cpi->initiator_id = 0; 2762 cpi->bus_id = cam_sim_bus(sim); 2763 cpi->base_transfer_speed = 150000; 2764 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); 2765 strlcpy(cpi->hba_vid, "AHCI", HBA_IDLEN); 2766 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); 2767 cpi->unit_number = cam_sim_unit(sim); 2768 cpi->transport = XPORT_SATA; 2769 cpi->transport_version = XPORT_VERSION_UNSPECIFIED; 2770 cpi->protocol = PROTO_ATA; 2771 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; 2772 cpi->maxio = MAXPHYS; 2773 /* ATI SB600 can't handle 256 sectors with FPDMA (NCQ). */ 2774 if (ch->quirks & AHCI_Q_MAXIO_64K) 2775 cpi->maxio = min(cpi->maxio, 128 * 512); 2776 cpi->hba_vendor = ch->vendorid; 2777 cpi->hba_device = ch->deviceid; 2778 cpi->hba_subvendor = ch->subvendorid; 2779 cpi->hba_subdevice = ch->subdeviceid; 2780 cpi->ccb_h.status = CAM_REQ_CMP; 2781 break; 2782 } 2783 default: 2784 ccb->ccb_h.status = CAM_REQ_INVALID; 2785 break; 2786 } 2787 ahci_done(ch, ccb); 2788 } 2789 2790 static void 2791 ahcipoll(struct cam_sim *sim) 2792 { 2793 struct ahci_channel *ch = (struct ahci_channel *)cam_sim_softc(sim); 2794 uint32_t istatus; 2795 2796 /* Read interrupt statuses and process if any. */ 2797 istatus = ATA_INL(ch->r_mem, AHCI_P_IS); 2798 if (istatus != 0) 2799 ahci_ch_intr_main(ch, istatus); 2800 if (ch->resetting != 0 && 2801 (--ch->resetpolldiv <= 0 || !callout_pending(&ch->reset_timer))) { 2802 ch->resetpolldiv = 1000; 2803 ahci_reset_to(ch); 2804 } 2805 } 2806 2807 devclass_t ahci_devclass; 2808 2809 MODULE_VERSION(ahci, 1); 2810 MODULE_DEPEND(ahci, cam, 1, 1, 1); 2811