1 /*- 2 * Copyright (c) 1998 - 2008 S�ren Schmidt <sos@FreeBSD.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification, immediately at the beginning of the file. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include "opt_ata.h" 31 #include <sys/param.h> 32 #include <sys/module.h> 33 #include <sys/systm.h> 34 #include <sys/kernel.h> 35 #include <sys/ata.h> 36 #include <sys/bus.h> 37 #include <sys/endian.h> 38 #include <sys/malloc.h> 39 #include <sys/lock.h> 40 #include <sys/mutex.h> 41 #include <sys/sema.h> 42 #include <sys/taskqueue.h> 43 #include <vm/uma.h> 44 #include <machine/stdarg.h> 45 #include <machine/resource.h> 46 #include <machine/bus.h> 47 #include <sys/rman.h> 48 #include <dev/pci/pcivar.h> 49 #include <dev/pci/pcireg.h> 50 #include <dev/ata/ata-all.h> 51 #include <dev/ata/ata-pci.h> 52 #include <ata_if.h> 53 54 /* local prototypes */ 55 static int ata_marvell_pata_chipinit(device_t dev); 56 static int ata_marvell_pata_ch_attach(device_t dev); 57 static void ata_marvell_pata_setmode(device_t dev, int mode); 58 static int ata_marvell_edma_ch_attach(device_t dev); 59 static int ata_marvell_edma_ch_detach(device_t dev); 60 static int ata_marvell_edma_status(device_t dev); 61 static int ata_marvell_edma_begin_transaction(struct ata_request *request); 62 static int ata_marvell_edma_end_transaction(struct ata_request *request); 63 static void ata_marvell_edma_reset(device_t dev); 64 static void ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error); 65 static void ata_marvell_edma_dmainit(device_t dev); 66 67 /* misc defines */ 68 #define MV_50XX 50 69 #define MV_60XX 60 70 #define MV_61XX 61 71 72 73 /* 74 * Marvell chipset support functions 75 */ 76 #define ATA_MV_HOST_BASE(ch) \ 77 ((ch->unit & 3) * 0x0100) + (ch->unit > 3 ? 0x30000 : 0x20000) 78 #define ATA_MV_EDMA_BASE(ch) \ 79 ((ch->unit & 3) * 0x2000) + (ch->unit > 3 ? 0x30000 : 0x20000) 80 81 struct ata_marvell_response { 82 u_int16_t tag; 83 u_int8_t edma_status; 84 u_int8_t dev_status; 85 u_int32_t timestamp; 86 }; 87 88 struct ata_marvell_dma_prdentry { 89 u_int32_t addrlo; 90 u_int32_t count; 91 u_int32_t addrhi; 92 u_int32_t reserved; 93 }; 94 95 static int 96 ata_marvell_probe(device_t dev) 97 { 98 struct ata_pci_controller *ctlr = device_get_softc(dev); 99 static struct ata_chip_id ids[] = 100 {{ ATA_M88SX5040, 0, 4, MV_50XX, ATA_SA150, "88SX5040" }, 101 { ATA_M88SX5041, 0, 4, MV_50XX, ATA_SA150, "88SX5041" }, 102 { ATA_M88SX5080, 0, 8, MV_50XX, ATA_SA150, "88SX5080" }, 103 { ATA_M88SX5081, 0, 8, MV_50XX, ATA_SA150, "88SX5081" }, 104 { ATA_M88SX6041, 0, 4, MV_60XX, ATA_SA300, "88SX6041" }, 105 { ATA_M88SX6081, 0, 8, MV_60XX, ATA_SA300, "88SX6081" }, 106 { ATA_M88SX6101, 0, 1, MV_61XX, ATA_UDMA6, "88SX6101" }, 107 { ATA_M88SX6121, 0, 1, MV_61XX, ATA_UDMA6, "88SX6121" }, 108 { ATA_M88SX6145, 0, 2, MV_61XX, ATA_UDMA6, "88SX6145" }, 109 { 0, 0, 0, 0, 0, 0}}; 110 111 if (pci_get_vendor(dev) != ATA_MARVELL_ID) 112 return ENXIO; 113 114 if (!(ctlr->chip = ata_match_chip(dev, ids))) 115 return ENXIO; 116 117 ata_set_desc(dev); 118 119 switch (ctlr->chip->cfg2) { 120 case MV_50XX: 121 case MV_60XX: 122 ctlr->chipinit = ata_marvell_edma_chipinit; 123 break; 124 case MV_61XX: 125 ctlr->chipinit = ata_marvell_pata_chipinit; 126 break; 127 } 128 return (BUS_PROBE_DEFAULT); 129 } 130 131 static int 132 ata_marvell_pata_chipinit(device_t dev) 133 { 134 struct ata_pci_controller *ctlr = device_get_softc(dev); 135 136 if (ata_setup_interrupt(dev, ata_generic_intr)) 137 return ENXIO; 138 139 ctlr->ch_attach = ata_marvell_pata_ch_attach; 140 ctlr->ch_detach = ata_pci_ch_detach; 141 ctlr->setmode = ata_marvell_pata_setmode; 142 ctlr->channels = ctlr->chip->cfg1; 143 return 0; 144 } 145 146 static int 147 ata_marvell_pata_ch_attach(device_t dev) 148 { 149 struct ata_channel *ch = device_get_softc(dev); 150 151 /* setup the usual register normal pci style */ 152 if (ata_pci_ch_attach(dev)) 153 return ENXIO; 154 155 /* dont use 32 bit PIO transfers */ 156 ch->flags |= ATA_USE_16BIT; 157 158 return 0; 159 } 160 161 static void 162 ata_marvell_pata_setmode(device_t dev, int mode) 163 { 164 device_t gparent = GRANDPARENT(dev); 165 struct ata_pci_controller *ctlr = device_get_softc(gparent); 166 struct ata_device *atadev = device_get_softc(dev); 167 168 mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma); 169 mode = ata_check_80pin(dev, mode); 170 if (!ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode)) 171 atadev->mode = mode; 172 } 173 174 int 175 ata_marvell_edma_chipinit(device_t dev) 176 { 177 struct ata_pci_controller *ctlr = device_get_softc(dev); 178 179 if (ata_setup_interrupt(dev, ata_generic_intr)) 180 return ENXIO; 181 182 ctlr->r_type1 = SYS_RES_MEMORY; 183 ctlr->r_rid1 = PCIR_BAR(0); 184 if (!(ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1, 185 &ctlr->r_rid1, RF_ACTIVE))) 186 return ENXIO; 187 188 /* mask all host controller interrupts */ 189 ATA_OUTL(ctlr->r_res1, 0x01d64, 0x00000000); 190 191 /* mask all PCI interrupts */ 192 ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x00000000); 193 194 ctlr->ch_attach = ata_marvell_edma_ch_attach; 195 ctlr->ch_detach = ata_marvell_edma_ch_detach; 196 ctlr->reset = ata_marvell_edma_reset; 197 ctlr->setmode = ata_sata_setmode; 198 ctlr->channels = ctlr->chip->cfg1; 199 200 /* clear host controller interrupts */ 201 ATA_OUTL(ctlr->r_res1, 0x20014, 0x00000000); 202 if (ctlr->chip->cfg1 > 4) 203 ATA_OUTL(ctlr->r_res1, 0x30014, 0x00000000); 204 205 /* clear PCI interrupts */ 206 ATA_OUTL(ctlr->r_res1, 0x01d58, 0x00000000); 207 208 /* unmask PCI interrupts we want */ 209 ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x007fffff); 210 211 /* unmask host controller interrupts we want */ 212 ATA_OUTL(ctlr->r_res1, 0x01d64, 0x000000ff/*HC0*/ | 0x0001fe00/*HC1*/ | 213 /*(1<<19) | (1<<20) | (1<<21) |*/(1<<22) | (1<<24) | (0x7f << 25)); 214 215 return 0; 216 } 217 218 static int 219 ata_marvell_edma_ch_attach(device_t dev) 220 { 221 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 222 struct ata_channel *ch = device_get_softc(dev); 223 u_int64_t work; 224 int i; 225 226 ata_marvell_edma_dmainit(dev); 227 work = ch->dma.work_bus; 228 /* clear work area */ 229 bzero(ch->dma.work, 1024+256); 230 231 /* set legacy ATA resources */ 232 for (i = ATA_DATA; i <= ATA_COMMAND; i++) { 233 ch->r_io[i].res = ctlr->r_res1; 234 ch->r_io[i].offset = 0x02100 + (i << 2) + ATA_MV_EDMA_BASE(ch); 235 } 236 ch->r_io[ATA_CONTROL].res = ctlr->r_res1; 237 ch->r_io[ATA_CONTROL].offset = 0x02120 + ATA_MV_EDMA_BASE(ch); 238 ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res1; 239 ata_default_registers(dev); 240 241 /* set SATA resources */ 242 switch (ctlr->chip->cfg2) { 243 case MV_50XX: 244 ch->r_io[ATA_SSTATUS].res = ctlr->r_res1; 245 ch->r_io[ATA_SSTATUS].offset = 0x00100 + ATA_MV_HOST_BASE(ch); 246 ch->r_io[ATA_SERROR].res = ctlr->r_res1; 247 ch->r_io[ATA_SERROR].offset = 0x00104 + ATA_MV_HOST_BASE(ch); 248 ch->r_io[ATA_SCONTROL].res = ctlr->r_res1; 249 ch->r_io[ATA_SCONTROL].offset = 0x00108 + ATA_MV_HOST_BASE(ch); 250 break; 251 case MV_60XX: 252 ch->r_io[ATA_SSTATUS].res = ctlr->r_res1; 253 ch->r_io[ATA_SSTATUS].offset = 0x02300 + ATA_MV_EDMA_BASE(ch); 254 ch->r_io[ATA_SERROR].res = ctlr->r_res1; 255 ch->r_io[ATA_SERROR].offset = 0x02304 + ATA_MV_EDMA_BASE(ch); 256 ch->r_io[ATA_SCONTROL].res = ctlr->r_res1; 257 ch->r_io[ATA_SCONTROL].offset = 0x02308 + ATA_MV_EDMA_BASE(ch); 258 ch->r_io[ATA_SACTIVE].res = ctlr->r_res1; 259 ch->r_io[ATA_SACTIVE].offset = 0x02350 + ATA_MV_EDMA_BASE(ch); 260 break; 261 } 262 263 ch->flags |= ATA_NO_SLAVE; 264 ch->flags |= ATA_USE_16BIT; /* XXX SOS needed ? */ 265 ata_generic_hw(dev); 266 ch->hw.begin_transaction = ata_marvell_edma_begin_transaction; 267 ch->hw.end_transaction = ata_marvell_edma_end_transaction; 268 ch->hw.status = ata_marvell_edma_status; 269 270 /* disable the EDMA machinery */ 271 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002); 272 DELAY(100000); /* SOS should poll for disabled */ 273 274 /* set configuration to non-queued 128b read transfers stop on error */ 275 ATA_OUTL(ctlr->r_res1, 0x02000 + ATA_MV_EDMA_BASE(ch), (1<<11) | (1<<13)); 276 277 /* request queue base high */ 278 ATA_OUTL(ctlr->r_res1, 0x02010 + ATA_MV_EDMA_BASE(ch), work >> 32); 279 280 /* request queue in ptr */ 281 ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff); 282 283 /* request queue out ptr */ 284 ATA_OUTL(ctlr->r_res1, 0x02018 + ATA_MV_EDMA_BASE(ch), 0x0); 285 286 /* response queue base high */ 287 work += 1024; 288 ATA_OUTL(ctlr->r_res1, 0x0201c + ATA_MV_EDMA_BASE(ch), work >> 32); 289 290 /* response queue in ptr */ 291 ATA_OUTL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch), 0x0); 292 293 /* response queue out ptr */ 294 ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff); 295 296 /* clear SATA error register */ 297 ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR)); 298 299 /* clear any outstanding error interrupts */ 300 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0); 301 302 /* unmask all error interrupts */ 303 ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0); 304 305 /* enable EDMA machinery */ 306 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001); 307 return 0; 308 } 309 310 static int 311 ata_marvell_edma_ch_detach(device_t dev) 312 { 313 314 ata_dmafini(dev); 315 return (0); 316 } 317 318 static int 319 ata_marvell_edma_status(device_t dev) 320 { 321 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 322 struct ata_channel *ch = device_get_softc(dev); 323 u_int32_t cause = ATA_INL(ctlr->r_res1, 0x01d60); 324 int shift = (ch->unit << 1) + (ch->unit > 3); 325 326 if (cause & (1 << shift)) { 327 328 /* clear interrupt(s) */ 329 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0); 330 331 /* do we have any PHY events ? */ 332 ata_sata_phy_check_events(dev); 333 } 334 335 /* do we have any device action ? */ 336 return (cause & (2 << shift)); 337 } 338 339 /* must be called with ATA channel locked and state_mtx held */ 340 static int 341 ata_marvell_edma_begin_transaction(struct ata_request *request) 342 { 343 struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); 344 struct ata_channel *ch = device_get_softc(request->parent); 345 u_int32_t req_in; 346 u_int8_t *bytep; 347 u_int16_t *wordp; 348 u_int32_t *quadp; 349 int i; 350 int error, slot; 351 352 /* only DMA R/W goes through the EMDA machine */ 353 if (request->u.ata.command != ATA_READ_DMA && 354 request->u.ata.command != ATA_WRITE_DMA) { 355 356 /* disable the EDMA machinery */ 357 if (ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001) 358 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002); 359 return ata_begin_transaction(request); 360 } 361 362 /* check for 48 bit access and convert if needed */ 363 ata_modify_if_48bit(request); 364 365 /* check sanity, setup SG list and DMA engine */ 366 if ((error = ch->dma.load(request, NULL, NULL))) { 367 device_printf(request->dev, "setting up DMA failed\n"); 368 request->result = error; 369 return ATA_OP_FINISHED; 370 } 371 372 /* get next free request queue slot */ 373 req_in = ATA_INL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch)); 374 slot = (((req_in & ~0xfffffc00) >> 5) + 0) & 0x1f; 375 bytep = (u_int8_t *)(ch->dma.work); 376 bytep += (slot << 5); 377 wordp = (u_int16_t *)bytep; 378 quadp = (u_int32_t *)bytep; 379 380 /* fill in this request */ 381 quadp[0] = (long)request->dma->sg_bus & 0xffffffff; 382 quadp[1] = (u_int64_t)request->dma->sg_bus >> 32; 383 wordp[4] = (request->flags & ATA_R_READ ? 0x01 : 0x00) | (request->tag<<1); 384 385 i = 10; 386 bytep[i++] = (request->u.ata.count >> 8) & 0xff; 387 bytep[i++] = 0x10 | ATA_COUNT; 388 bytep[i++] = request->u.ata.count & 0xff; 389 bytep[i++] = 0x10 | ATA_COUNT; 390 391 bytep[i++] = (request->u.ata.lba >> 24) & 0xff; 392 bytep[i++] = 0x10 | ATA_SECTOR; 393 bytep[i++] = request->u.ata.lba & 0xff; 394 bytep[i++] = 0x10 | ATA_SECTOR; 395 396 bytep[i++] = (request->u.ata.lba >> 32) & 0xff; 397 bytep[i++] = 0x10 | ATA_CYL_LSB; 398 bytep[i++] = (request->u.ata.lba >> 8) & 0xff; 399 bytep[i++] = 0x10 | ATA_CYL_LSB; 400 401 bytep[i++] = (request->u.ata.lba >> 40) & 0xff; 402 bytep[i++] = 0x10 | ATA_CYL_MSB; 403 bytep[i++] = (request->u.ata.lba >> 16) & 0xff; 404 bytep[i++] = 0x10 | ATA_CYL_MSB; 405 406 bytep[i++] = ATA_D_LBA | ATA_D_IBM | ((request->u.ata.lba >> 24) & 0xf); 407 bytep[i++] = 0x10 | ATA_DRIVE; 408 409 bytep[i++] = request->u.ata.command; 410 bytep[i++] = 0x90 | ATA_COMMAND; 411 412 /* enable EDMA machinery if needed */ 413 if (!(ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) { 414 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001); 415 while (!(ATA_INL(ctlr->r_res1, 416 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) 417 DELAY(10); 418 } 419 420 /* tell EDMA it has a new request */ 421 slot = (((req_in & ~0xfffffc00) >> 5) + 1) & 0x1f; 422 req_in &= 0xfffffc00; 423 req_in += (slot << 5); 424 ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), req_in); 425 426 return ATA_OP_CONTINUES; 427 } 428 429 /* must be called with ATA channel locked and state_mtx held */ 430 static int 431 ata_marvell_edma_end_transaction(struct ata_request *request) 432 { 433 struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev)); 434 struct ata_channel *ch = device_get_softc(request->parent); 435 int offset = (ch->unit > 3 ? 0x30014 : 0x20014); 436 u_int32_t icr = ATA_INL(ctlr->r_res1, offset); 437 int res; 438 439 /* EDMA interrupt */ 440 if ((icr & (0x0001 << (ch->unit & 3)))) { 441 struct ata_marvell_response *response; 442 u_int32_t rsp_in, rsp_out; 443 int slot; 444 445 /* stop timeout */ 446 callout_stop(&request->callout); 447 448 /* get response ptr's */ 449 rsp_in = ATA_INL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch)); 450 rsp_out = ATA_INL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch)); 451 slot = (((rsp_in & ~0xffffff00) >> 3)) & 0x1f; 452 rsp_out &= 0xffffff00; 453 rsp_out += (slot << 3); 454 response = (struct ata_marvell_response *) 455 (ch->dma.work + 1024 + (slot << 3)); 456 457 /* record status for this request */ 458 request->status = response->dev_status; 459 request->error = 0; 460 461 /* ack response */ 462 ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), rsp_out); 463 464 /* update progress */ 465 if (!(request->status & ATA_S_ERROR) && 466 !(request->flags & ATA_R_TIMEOUT)) 467 request->donecount = request->bytecount; 468 469 /* unload SG list */ 470 ch->dma.unload(request); 471 472 res = ATA_OP_FINISHED; 473 } 474 475 /* legacy ATA interrupt */ 476 else { 477 res = ata_end_transaction(request); 478 } 479 480 /* ack interrupt */ 481 ATA_OUTL(ctlr->r_res1, offset, ~(icr & (0x0101 << (ch->unit & 3)))); 482 return res; 483 } 484 485 static void 486 ata_marvell_edma_reset(device_t dev) 487 { 488 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 489 struct ata_channel *ch = device_get_softc(dev); 490 491 /* disable the EDMA machinery */ 492 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002); 493 while ((ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) 494 DELAY(10); 495 496 /* clear SATA error register */ 497 ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR)); 498 499 /* clear any outstanding error interrupts */ 500 ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0); 501 502 /* unmask all error interrupts */ 503 ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0); 504 505 /* enable channel and test for devices */ 506 if (ata_sata_phy_reset(dev, -1, 1)) 507 ata_generic_reset(dev); 508 509 /* enable EDMA machinery */ 510 ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001); 511 } 512 513 static void 514 ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, 515 int error) 516 { 517 struct ata_dmasetprd_args *args = xsc; 518 struct ata_marvell_dma_prdentry *prd = args->dmatab; 519 int i; 520 521 if ((args->error = error)) 522 return; 523 524 for (i = 0; i < nsegs; i++) { 525 prd[i].addrlo = htole32(segs[i].ds_addr); 526 prd[i].count = htole32(segs[i].ds_len); 527 prd[i].addrhi = htole32((u_int64_t)segs[i].ds_addr >> 32); 528 } 529 prd[i - 1].count |= htole32(ATA_DMA_EOT); 530 KASSERT(nsegs <= ATA_DMA_ENTRIES, ("too many DMA segment entries\n")); 531 args->nsegs = nsegs; 532 } 533 534 static void 535 ata_marvell_edma_dmainit(device_t dev) 536 { 537 struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); 538 struct ata_channel *ch = device_get_softc(dev); 539 540 ata_dmainit(dev); 541 /* note start and stop are not used here */ 542 ch->dma.setprd = ata_marvell_edma_dmasetprd; 543 544 /* if 64bit support present adjust max address used */ 545 if (ATA_INL(ctlr->r_res1, 0x00d00) & 0x00000004) 546 ch->dma.max_address = BUS_SPACE_MAXADDR; 547 548 /* chip does not reliably do 64K DMA transfers */ 549 ch->dma.max_iosize = 64 * DEV_BSIZE; 550 } 551 552 ATA_DECLARE_DRIVER(ata_marvell); 553