1 /*- 2 * Copyright (c) 1998 - 2008 Søren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ata.h" 31 #include <sys/param.h> 32 #include <sys/module.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/ata.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/malloc.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/sema.h> 42 #include <sys/taskqueue.h> 43 #include <vm/uma.h> 44 #include <machine/stdarg.h> 45 #include <machine/resource.h> 46 #include <machine/bus.h> 47 #include <sys/rman.h> 48 #include <dev/pci/pcivar.h> 49 #include <dev/pci/pcireg.h> 50 #include <dev/ata/ata-all.h> 51 #include <dev/ata/ata-pci.h> 52 #include <ata_if.h> 53 54 /* local prototypes */ 55 static int ata_marvell_chipinit(device_t dev); 56 static int ata_marvell_ch_attach(device_t dev); 57 static int ata_marvell_setmode(device_t dev, int target, int mode); 58 static int ata_marvell_dummy_chipinit(device_t dev); 59 static int ata_marvell_edma_ch_attach(device_t dev); 60 static int ata_marvell_edma_ch_detach(device_t dev); 61 static int ata_marvell_edma_status(device_t dev); 62 static int ata_marvell_edma_begin_transaction(struct ata_request *request); 63 static int ata_marvell_edma_end_transaction(struct ata_request *request); 64 static void ata_marvell_edma_reset(device_t dev); 65 static void ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); 66 static void ata_marvell_edma_dmainit(device_t dev); 67 68 /* misc defines */ 69 #define MV_50XX 50 70 #define MV_60XX 60 71 #define MV_6042 62 72 #define MV_7042 72 73 #define MV_61XX 61 74 #define MV_91XX 91 75 76 /* 77 * Marvell chipset support functions 78 */ 79 #define ATA_MV_HOST_BASE(ch) \ 80 ((ch->unit & 3) * 0x0100) + (ch->unit > 3 ? 0x30000 : 0x20000) 81 #define ATA_MV_EDMA_BASE(ch) \ 82 ((ch->unit & 3) * 0x2000) + (ch->unit > 3 ? 0x30000 : 0x20000) 83 84 struct ata_marvell_response { 85 u_int16_t tag; 86 u_int8_t edma_status; 87 u_int8_t dev_status; 88 u_int32_t timestamp; 89 }; 90 91 struct ata_marvell_dma_prdentry { 92 u_int32_t addrlo; 93 u_int32_t count; 94 u_int32_t addrhi; 95 u_int32_t reserved; 96 }; 97 98 static int 99 ata_marvell_probe(device_t dev) 100 { 101 struct ata_pci_controller *ctlr = device_get_softc(dev); 102 static const struct ata_chip_id const ids[] = 103 {{ ATA_M88SX5040, 0, 4, MV_50XX, ATA_SA150, "88SX5040" }, 104 { ATA_M88SX5041, 0, 4, MV_50XX, ATA_SA150, "88SX5041" }, 105 { ATA_M88SX5080, 0, 8, MV_50XX, ATA_SA150, "88SX5080" }, 106 { ATA_M88SX5081, 0, 8, MV_50XX, ATA_SA150, "88SX5081" }, 107 { ATA_M88SX6041, 0, 4, MV_60XX, ATA_SA300, "88SX6041" }, 108 { ATA_M88SX6042, 0, 4, MV_6042, ATA_SA300, "88SX6042" }, 109 { ATA_M88SX6081, 0, 8, MV_60XX, ATA_SA300, "88SX6081" }, 110 { ATA_M88SX7042, 0, 4, MV_7042, ATA_SA300, "88SX7042" }, 111 { ATA_M88SE6101, 0, 0, MV_61XX, ATA_UDMA6, "88SE6101" }, 112 { ATA_M88SE6102, 0, 0, MV_61XX, ATA_UDMA6, "88SE6102" }, 113 { ATA_M88SE6111, 0, 1, MV_61XX, ATA_UDMA6, "88SE6111" }, 114 { ATA_M88SE6121, 0, 2, MV_61XX, ATA_UDMA6, "88SE6121" }, 115 { ATA_M88SE6141, 0, 4, MV_61XX, ATA_UDMA6, "88SE6141" }, 116 { ATA_M88SE6145, 0, 4, MV_61XX, ATA_UDMA6, "88SE6145" }, 117 { 0x91a41b4b, 0, 0, MV_91XX, ATA_UDMA6, "88SE912x" }, 118 { 0, 0, 0, 0, 0, 0}}; 119 120 if (pci_get_vendor(dev) != ATA_MARVELL_ID && 121 pci_get_vendor(dev) != ATA_MARVELL2_ID) 122 return ENXIO; 123 124 if (!(ctlr->chip = ata_match_chip(dev, ids))) 125 return ENXIO; 126 127 ata_set_desc(dev); 128 129 switch (ctlr->chip->cfg2) { 130 case MV_50XX: 131 case MV_60XX: 132 case MV_6042: 133 case MV_7042: 134 ctlr->chipinit = ata_marvell_edma_chipinit; 135 break; 136 case MV_61XX: 137 ctlr->chipinit = ata_marvell_chipinit; 138 break; 139 case MV_91XX: 140 ctlr->chipinit = ata_marvell_dummy_chipinit; 141 break; 142 } 143 return (BUS_PROBE_DEFAULT); 144 } 145 146 static int 147 ata_marvell_chipinit(device_t dev) 148 { 149 struct ata_pci_controller *ctlr = device_get_softc(dev); 150 device_t child; 151 152 if (ata_setup_interrupt(dev, ata_generic_intr)) 153 return ENXIO; 154 /* Create AHCI subdevice if AHCI part present. */ 155 if (ctlr->chip->cfg1) { 156 child = device_add_child(dev, NULL, -1); 157 if (child != NULL) { 158 device_set_ivars(child, (void *)(intptr_t)-1); 159 bus_generic_attach(dev); 160 } 161 } 162 ctlr->ch_attach = ata_marvell_ch_attach; 163 ctlr->ch_detach = ata_pci_ch_detach; 164 ctlr->reset = ata_generic_reset; 165 ctlr->setmode = ata_marvell_setmode; 166 ctlr->channels = 1; 167 return (0); 168 } 169 170 static int 171 ata_marvell_ch_attach(device_t dev) 172 { 173 struct ata_channel *ch = device_get_softc(dev); 174 int error; 175 176 error = ata_pci_ch_attach(dev); 177 /* dont use 32 bit PIO transfers */ 178 ch->flags |= ATA_USE_16BIT; 179 ch->flags |= ATA_CHECKS_CABLE; 180 return (error); 181 } 182 183 static int 184 ata_marvell_setmode(device_t dev, int target, int mode) 185 { 186 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 187 struct ata_channel *ch = device_get_softc(dev); 188 189 mode = min(mode, ctlr->chip->max_dma); 190 /* Check for 80pin cable present. */ 191 if (ata_dma_check_80pin && mode > ATA_UDMA2 && 192 ATA_IDX_INB(ch, ATA_BMDEVSPEC_0) & 0x01) { 193 ata_print_cable(dev, "controller"); 194 mode = ATA_UDMA2; 195 } 196 /* Nothing to do to setup mode, the controller snoop SET_FEATURE cmd. */ 197 return (mode); 198 } 199 200 static int 201 ata_marvell_dummy_chipinit(device_t dev) 202 { 203 struct ata_pci_controller *ctlr = device_get_softc(dev); 204 205 ctlr->channels = 0; 206 return (0); 207 } 208 209 int 210 ata_marvell_edma_chipinit(device_t dev) 211 { 212 struct ata_pci_controller *ctlr = device_get_softc(dev); 213 214 if (ata_setup_interrupt(dev, ata_generic_intr)) 215 return ENXIO; 216 217 ctlr->r_type1 = SYS_RES_MEMORY; 218 ctlr->r_rid1 = PCIR_BAR(0); 219 if (!(ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, 220 &ctlr->r_rid1, RF_ACTIVE))) 221 return ENXIO; 222 223 /* mask all host controller interrupts */ 224 ATA_OUTL(ctlr->r_res1, 0x01d64, 0x00000000); 225 226 /* mask all PCI interrupts */ 227 ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x00000000); 228 229 ctlr->ch_attach = ata_marvell_edma_ch_attach; 230 ctlr->ch_detach = ata_marvell_edma_ch_detach; 231 ctlr->reset = ata_marvell_edma_reset; 232 ctlr->setmode = ata_sata_setmode; 233 ctlr->getrev = ata_sata_getrev; 234 ctlr->channels = ctlr->chip->cfg1; 235 236 /* clear host controller interrupts */ 237 ATA_OUTL(ctlr->r_res1, 0x20014, 0x00000000); 238 if (ctlr->chip->cfg1 > 4) 239 ATA_OUTL(ctlr->r_res1, 0x30014, 0x00000000); 240 241 /* clear PCI interrupts */ 242 ATA_OUTL(ctlr->r_res1, 0x01d58, 0x00000000); 243 244 /* unmask PCI interrupts we want */ 245 ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x007fffff); 246 247 /* unmask host controller interrupts we want */ 248 ATA_OUTL(ctlr->r_res1, 0x01d64, 0x000000ff/*HC0*/ | 0x0001fe00/*HC1*/ | 249 /*(1<<19) | (1<<20) | (1<<21) |*/(1<<22) | (1<<24) | (0x7f << 25)); 250 251 return 0; 252 } 253 254 static int 255 ata_marvell_edma_ch_attach(device_t dev) 256 { 257 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 258 struct ata_channel *ch = device_get_softc(dev); 259 u_int64_t work; 260 int i; 261 262 ata_marvell_edma_dmainit(dev); 263 work = ch->dma.work_bus; 264 /* clear work area */ 265 bzero(ch->dma.work, 1024+256); 266 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 267 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 268 269 /* set legacy ATA resources */ 270 for (i = ATA_DATA; i <= ATA_COMMAND; i++) { 271 ch->r_io[i].res = ctlr->r_res1; 272 ch->r_io[i].offset = 0x02100 + (i << 2) + ATA_MV_EDMA_BASE(ch); 273 } 274 ch->r_io[ATA_CONTROL].res = ctlr->r_res1; 275 ch->r_io[ATA_CONTROL].offset = 0x02120 + ATA_MV_EDMA_BASE(ch); 276 ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res1; 277 ata_default_registers(dev); 278 279 /* set SATA resources */ 280 switch (ctlr->chip->cfg2) { 281 case MV_50XX: 282 ch->r_io[ATA_SSTATUS].res = ctlr->r_res1; 283 ch->r_io[ATA_SSTATUS].offset = 0x00100 + ATA_MV_HOST_BASE(ch); 284 ch->r_io[ATA_SERROR].res = ctlr->r_res1; 285 ch->r_io[ATA_SERROR].offset = 0x00104 + ATA_MV_HOST_BASE(ch); 286 ch->r_io[ATA_SCONTROL].res = ctlr->r_res1; 287 ch->r_io[ATA_SCONTROL].offset = 0x00108 + ATA_MV_HOST_BASE(ch); 288 break; 289 case MV_60XX: 290 case MV_6042: 291 case MV_7042: 292 ch->r_io[ATA_SSTATUS].res = ctlr->r_res1; 293 ch->r_io[ATA_SSTATUS].offset = 0x02300 + ATA_MV_EDMA_BASE(ch); 294 ch->r_io[ATA_SERROR].res = ctlr->r_res1; 295 ch->r_io[ATA_SERROR].offset = 0x02304 + ATA_MV_EDMA_BASE(ch); 296 ch->r_io[ATA_SCONTROL].res = ctlr->r_res1; 297 ch->r_io[ATA_SCONTROL].offset = 0x02308 + ATA_MV_EDMA_BASE(ch); 298 ch->r_io[ATA_SACTIVE].res = ctlr->r_res1; 299 ch->r_io[ATA_SACTIVE].offset = 0x02350 + ATA_MV_EDMA_BASE(ch); 300 break; 301 } 302 303 ch->flags |= ATA_NO_SLAVE; 304 ch->flags |= ATA_USE_16BIT; /* XXX SOS needed ? */ 305 ch->flags |= ATA_SATA; 306 ata_generic_hw(dev); 307 ch->hw.begin_transaction = ata_marvell_edma_begin_transaction; 308 ch->hw.end_transaction = ata_marvell_edma_end_transaction; 309 ch->hw.status = ata_marvell_edma_status; 310 311 /* disable the EDMA machinery */ 312 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002); 313 DELAY(100000); /* SOS should poll for disabled */ 314 315 /* set configuration to non-queued 128b read transfers stop on error */ 316 ATA_OUTL(ctlr->r_res1, 0x02000 + ATA_MV_EDMA_BASE(ch), (1<<11) | (1<<13)); 317 318 /* request queue base high */ 319 ATA_OUTL(ctlr->r_res1, 0x02010 + ATA_MV_EDMA_BASE(ch), work >> 32); 320 321 /* request queue in ptr */ 322 ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff); 323 324 /* request queue out ptr */ 325 ATA_OUTL(ctlr->r_res1, 0x02018 + ATA_MV_EDMA_BASE(ch), 0x0); 326 327 /* response queue base high */ 328 work += 1024; 329 ATA_OUTL(ctlr->r_res1, 0x0201c + ATA_MV_EDMA_BASE(ch), work >> 32); 330 331 /* response queue in ptr */ 332 ATA_OUTL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch), 0x0); 333 334 /* response queue out ptr */ 335 ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff); 336 337 /* clear SATA error register */ 338 ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR)); 339 340 /* clear any outstanding error interrupts */ 341 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0); 342 343 /* unmask all error interrupts */ 344 ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0); 345 346 /* enable EDMA machinery */ 347 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001); 348 return 0; 349 } 350 351 static int 352 ata_marvell_edma_ch_detach(device_t dev) 353 { 354 struct ata_channel *ch = device_get_softc(dev); 355 356 if (ch->dma.work_tag && ch->dma.work_map) 357 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 358 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 359 ata_dmafini(dev); 360 return (0); 361 } 362 363 static int 364 ata_marvell_edma_status(device_t dev) 365 { 366 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 367 struct ata_channel *ch = device_get_softc(dev); 368 u_int32_t cause = ATA_INL(ctlr->r_res1, 0x01d60); 369 int shift = (ch->unit << 1) + (ch->unit > 3); 370 371 if (cause & (1 << shift)) { 372 373 /* clear interrupt(s) */ 374 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0); 375 376 /* do we have any PHY events ? */ 377 ata_sata_phy_check_events(dev, -1); 378 } 379 380 /* do we have any device action ? */ 381 return (cause & (2 << shift)); 382 } 383 384 /* must be called with ATA channel locked and state_mtx held */ 385 static int 386 ata_marvell_edma_begin_transaction(struct ata_request *request) 387 { 388 struct ata_pci_controller *ctlr=device_get_softc(device_get_parent(request->parent)); 389 struct ata_channel *ch = device_get_softc(request->parent); 390 u_int32_t req_in; 391 u_int8_t *bytep; 392 int i; 393 int error, slot; 394 395 /* only DMA R/W goes through the EMDA machine */ 396 if (request->u.ata.command != ATA_READ_DMA && 397 request->u.ata.command != ATA_WRITE_DMA && 398 request->u.ata.command != ATA_READ_DMA48 && 399 request->u.ata.command != ATA_WRITE_DMA48) { 400 401 /* disable the EDMA machinery */ 402 if (ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001) 403 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002); 404 return ata_begin_transaction(request); 405 } 406 407 /* check sanity, setup SG list and DMA engine */ 408 if ((error = ch->dma.load(request, NULL, NULL))) { 409 device_printf(request->parent, "setting up DMA failed\n"); 410 request->result = error; 411 return ATA_OP_FINISHED; 412 } 413 414 /* get next free request queue slot */ 415 req_in = ATA_INL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch)); 416 slot = (((req_in & ~0xfffffc00) >> 5) + 0) & 0x1f; 417 bytep = (u_int8_t *)(ch->dma.work); 418 bytep += (slot << 5); 419 420 /* fill in this request */ 421 le32enc(bytep + 0 * sizeof(u_int32_t), 422 request->dma->sg_bus & 0xffffffff); 423 le32enc(bytep + 1 * sizeof(u_int32_t), 424 (u_int64_t)request->dma->sg_bus >> 32); 425 if (ctlr->chip->cfg2 != MV_6042 && ctlr->chip->cfg2 != MV_7042) { 426 le16enc(bytep + 4 * sizeof(u_int16_t), 427 (request->flags & ATA_R_READ ? 0x01 : 0x00) | (request->tag << 1)); 428 429 i = 10; 430 bytep[i++] = (request->u.ata.count >> 8) & 0xff; 431 bytep[i++] = 0x10 | ATA_COUNT; 432 bytep[i++] = request->u.ata.count & 0xff; 433 bytep[i++] = 0x10 | ATA_COUNT; 434 435 bytep[i++] = (request->u.ata.lba >> 24) & 0xff; 436 bytep[i++] = 0x10 | ATA_SECTOR; 437 bytep[i++] = request->u.ata.lba & 0xff; 438 bytep[i++] = 0x10 | ATA_SECTOR; 439 440 bytep[i++] = (request->u.ata.lba >> 32) & 0xff; 441 bytep[i++] = 0x10 | ATA_CYL_LSB; 442 bytep[i++] = (request->u.ata.lba >> 8) & 0xff; 443 bytep[i++] = 0x10 | ATA_CYL_LSB; 444 445 bytep[i++] = (request->u.ata.lba >> 40) & 0xff; 446 bytep[i++] = 0x10 | ATA_CYL_MSB; 447 bytep[i++] = (request->u.ata.lba >> 16) & 0xff; 448 bytep[i++] = 0x10 | ATA_CYL_MSB; 449 450 bytep[i++] = ATA_D_LBA | ATA_D_IBM | ((request->u.ata.lba >> 24) & 0xf); 451 bytep[i++] = 0x10 | ATA_DRIVE; 452 453 bytep[i++] = request->u.ata.command; 454 bytep[i++] = 0x90 | ATA_COMMAND; 455 } else { 456 le32enc(bytep + 2 * sizeof(u_int32_t), 457 (request->flags & ATA_R_READ ? 0x01 : 0x00) | (request->tag << 1)); 458 459 i = 16; 460 bytep[i++] = 0; 461 bytep[i++] = 0; 462 bytep[i++] = request->u.ata.command; 463 bytep[i++] = request->u.ata.feature & 0xff; 464 465 bytep[i++] = request->u.ata.lba & 0xff; 466 bytep[i++] = (request->u.ata.lba >> 8) & 0xff; 467 bytep[i++] = (request->u.ata.lba >> 16) & 0xff; 468 bytep[i++] = ATA_D_LBA | ATA_D_IBM | ((request->u.ata.lba >> 24) & 0x0f); 469 470 bytep[i++] = (request->u.ata.lba >> 24) & 0xff; 471 bytep[i++] = (request->u.ata.lba >> 32) & 0xff; 472 bytep[i++] = (request->u.ata.lba >> 40) & 0xff; 473 bytep[i++] = (request->u.ata.feature >> 8) & 0xff; 474 475 bytep[i++] = request->u.ata.count & 0xff; 476 bytep[i++] = (request->u.ata.count >> 8) & 0xff; 477 bytep[i++] = 0; 478 bytep[i++] = 0; 479 } 480 481 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 482 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 483 484 /* enable EDMA machinery if needed */ 485 if (!(ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) { 486 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001); 487 while (!(ATA_INL(ctlr->r_res1, 488 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) 489 DELAY(10); 490 } 491 492 /* tell EDMA it has a new request */ 493 slot = (((req_in & ~0xfffffc00) >> 5) + 1) & 0x1f; 494 req_in &= 0xfffffc00; 495 req_in += (slot << 5); 496 ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), req_in); 497 498 return ATA_OP_CONTINUES; 499 } 500 501 /* must be called with ATA channel locked and state_mtx held */ 502 static int 503 ata_marvell_edma_end_transaction(struct ata_request *request) 504 { 505 struct ata_pci_controller *ctlr=device_get_softc(device_get_parent(request->parent)); 506 struct ata_channel *ch = device_get_softc(request->parent); 507 int offset = (ch->unit > 3 ? 0x30014 : 0x20014); 508 u_int32_t icr = ATA_INL(ctlr->r_res1, offset); 509 int res; 510 511 /* EDMA interrupt */ 512 if ((icr & (0x0001 << (ch->unit & 3)))) { 513 struct ata_marvell_response *response; 514 u_int32_t rsp_in, rsp_out; 515 int slot; 516 517 /* stop timeout */ 518 callout_stop(&request->callout); 519 520 /* get response ptr's */ 521 rsp_in = ATA_INL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch)); 522 rsp_out = ATA_INL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch)); 523 slot = (((rsp_in & ~0xffffff00) >> 3)) & 0x1f; 524 rsp_out &= 0xffffff00; 525 rsp_out += (slot << 3); 526 bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map, 527 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 528 response = (struct ata_marvell_response *) 529 (ch->dma.work + 1024 + (slot << 3)); 530 531 /* record status for this request */ 532 request->status = response->dev_status; 533 request->error = 0; 534 535 /* ack response */ 536 ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), rsp_out); 537 538 /* update progress */ 539 if (!(request->status & ATA_S_ERROR) && 540 !(request->flags & ATA_R_TIMEOUT)) 541 request->donecount = request->bytecount; 542 543 /* unload SG list */ 544 ch->dma.unload(request); 545 546 res = ATA_OP_FINISHED; 547 } 548 549 /* legacy ATA interrupt */ 550 else { 551 res = ata_end_transaction(request); 552 } 553 554 /* ack interrupt */ 555 ATA_OUTL(ctlr->r_res1, offset, ~(icr & (0x0101 << (ch->unit & 3)))); 556 return res; 557 } 558 559 static void 560 ata_marvell_edma_reset(device_t dev) 561 { 562 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 563 struct ata_channel *ch = device_get_softc(dev); 564 565 /* disable the EDMA machinery */ 566 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002); 567 while ((ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) 568 DELAY(10); 569 570 /* clear SATA error register */ 571 ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR)); 572 573 /* clear any outstanding error interrupts */ 574 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0); 575 576 /* unmask all error interrupts */ 577 ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0); 578 579 /* enable channel and test for devices */ 580 if (ata_sata_phy_reset(dev, -1, 1)) 581 ata_generic_reset(dev); 582 else 583 ch->devices = 0; 584 585 /* enable EDMA machinery */ 586 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001); 587 } 588 589 static void 590 ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, 591 int error) 592 { 593 struct ata_dmasetprd_args *args = xsc; 594 struct ata_marvell_dma_prdentry *prd = args->dmatab; 595 int i; 596 597 if ((args->error = error)) 598 return; 599 600 for (i = 0; i < nsegs; i++) { 601 prd[i].addrlo = htole32(segs[i].ds_addr); 602 prd[i].count = htole32(segs[i].ds_len); 603 prd[i].addrhi = htole32((u_int64_t)segs[i].ds_addr >> 32); 604 prd[i].reserved = 0; 605 } 606 prd[i - 1].count |= htole32(ATA_DMA_EOT); 607 KASSERT(nsegs <= ATA_DMA_ENTRIES, ("too many DMA segment entries\n")); 608 args->nsegs = nsegs; 609 } 610 611 static void 612 ata_marvell_edma_dmainit(device_t dev) 613 { 614 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 615 struct ata_channel *ch = device_get_softc(dev); 616 617 /* note start and stop are not used here */ 618 ch->dma.setprd = ata_marvell_edma_dmasetprd; 619 620 /* if 64bit support present adjust max address used */ 621 if (ATA_INL(ctlr->r_res1, 0x00d00) & 0x00000004) 622 ch->dma.max_address = BUS_SPACE_MAXADDR; 623 624 /* chip does not reliably do 64K DMA transfers */ 625 if (ctlr->chip->cfg2 == MV_50XX || ctlr->chip->cfg2 == MV_60XX) 626 ch->dma.max_iosize = 64 * DEV_BSIZE; 627 ata_dmainit(dev); 628 } 629 630 ATA_DECLARE_DRIVER(ata_marvell); 631