xref: /freebsd/sys/dev/ata/chipsets/ata-marvell.c (revision 6c6c03be2ddb04c54e455122799923deaefa4114)
1 /*-
2  * Copyright (c) 1998 - 2008 S�ren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ata.h"
31 #include <sys/param.h>
32 #include <sys/module.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/ata.h>
36 #include <sys/bus.h>
37 #include <sys/endian.h>
38 #include <sys/malloc.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/sema.h>
42 #include <sys/taskqueue.h>
43 #include <vm/uma.h>
44 #include <machine/stdarg.h>
45 #include <machine/resource.h>
46 #include <machine/bus.h>
47 #include <sys/rman.h>
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/ata/ata-all.h>
51 #include <dev/ata/ata-pci.h>
52 #include <ata_if.h>
53 
54 /* local prototypes */
55 static int ata_marvell_pata_chipinit(device_t dev);
56 static int ata_marvell_pata_allocate(device_t dev);
57 static void ata_marvell_pata_setmode(device_t dev, int mode);
58 static int ata_marvell_edma_allocate(device_t dev);
59 static int ata_marvell_edma_status(device_t dev);
60 static int ata_marvell_edma_begin_transaction(struct ata_request *request);
61 static int ata_marvell_edma_end_transaction(struct ata_request *request);
62 static void ata_marvell_edma_reset(device_t dev);
63 static void ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
64 static void ata_marvell_edma_dmainit(device_t dev);
65 
66 /* misc defines */
67 #define MV_50XX		50
68 #define MV_60XX		60
69 #define MV_61XX		61
70 
71 
72 /*
73  * Marvell chipset support functions
74  */
75 #define ATA_MV_HOST_BASE(ch) \
76 	((ch->unit & 3) * 0x0100) + (ch->unit > 3 ? 0x30000 : 0x20000)
77 #define ATA_MV_EDMA_BASE(ch) \
78 	((ch->unit & 3) * 0x2000) + (ch->unit > 3 ? 0x30000 : 0x20000)
79 
80 struct ata_marvell_response {
81     u_int16_t   tag;
82     u_int8_t    edma_status;
83     u_int8_t    dev_status;
84     u_int32_t   timestamp;
85 };
86 
87 struct ata_marvell_dma_prdentry {
88     u_int32_t addrlo;
89     u_int32_t count;
90     u_int32_t addrhi;
91     u_int32_t reserved;
92 };
93 
94 static int
95 ata_marvell_probe(device_t dev)
96 {
97     struct ata_pci_controller *ctlr = device_get_softc(dev);
98     static struct ata_chip_id ids[] =
99     {{ ATA_M88SX5040, 0, 4, MV_50XX, ATA_SA150, "88SX5040" },
100      { ATA_M88SX5041, 0, 4, MV_50XX, ATA_SA150, "88SX5041" },
101      { ATA_M88SX5080, 0, 8, MV_50XX, ATA_SA150, "88SX5080" },
102      { ATA_M88SX5081, 0, 8, MV_50XX, ATA_SA150, "88SX5081" },
103      { ATA_M88SX6041, 0, 4, MV_60XX, ATA_SA300, "88SX6041" },
104      { ATA_M88SX6081, 0, 8, MV_60XX, ATA_SA300, "88SX6081" },
105      { ATA_M88SX6101, 0, 1, MV_61XX, ATA_UDMA6, "88SX6101" },
106      { ATA_M88SX6145, 0, 2, MV_61XX, ATA_UDMA6, "88SX6145" },
107      { 0, 0, 0, 0, 0, 0}};
108 
109     if (pci_get_vendor(dev) != ATA_MARVELL_ID)
110 	return ENXIO;
111 
112     if (!(ctlr->chip = ata_match_chip(dev, ids)))
113 	return ENXIO;
114 
115     ata_set_desc(dev);
116 
117     switch (ctlr->chip->cfg2) {
118     case MV_50XX:
119     case MV_60XX:
120 	ctlr->chipinit = ata_marvell_edma_chipinit;
121 	break;
122     case MV_61XX:
123 	ctlr->chipinit = ata_marvell_pata_chipinit;
124 	break;
125     }
126     return 0;
127 }
128 
129 static int
130 ata_marvell_pata_chipinit(device_t dev)
131 {
132     struct ata_pci_controller *ctlr = device_get_softc(dev);
133 
134     if (ata_setup_interrupt(dev, ata_generic_intr))
135 	return ENXIO;
136 
137     ctlr->allocate = ata_marvell_pata_allocate;
138     ctlr->setmode = ata_marvell_pata_setmode;
139     ctlr->channels = ctlr->chip->cfg1;
140     return 0;
141 }
142 
143 static int
144 ata_marvell_pata_allocate(device_t dev)
145 {
146     struct ata_channel *ch = device_get_softc(dev);
147 
148     /* setup the usual register normal pci style */
149     if (ata_pci_allocate(dev))
150 	return ENXIO;
151 
152     /* dont use 32 bit PIO transfers */
153 	ch->flags |= ATA_USE_16BIT;
154 
155     return 0;
156 }
157 
158 static void
159 ata_marvell_pata_setmode(device_t dev, int mode)
160 {
161     device_t gparent = GRANDPARENT(dev);
162     struct ata_pci_controller *ctlr = device_get_softc(gparent);
163     struct ata_device *atadev = device_get_softc(dev);
164 
165     mode = ata_limit_mode(dev, mode, ctlr->chip->max_dma);
166     mode = ata_check_80pin(dev, mode);
167     if (!ata_controlcmd(dev, ATA_SETFEATURES, ATA_SF_SETXFER, 0, mode))
168 	atadev->mode = mode;
169 }
170 
171 int
172 ata_marvell_edma_chipinit(device_t dev)
173 {
174     struct ata_pci_controller *ctlr = device_get_softc(dev);
175 
176     if (ata_setup_interrupt(dev, ata_generic_intr))
177 	return ENXIO;
178 
179     ctlr->r_type1 = SYS_RES_MEMORY;
180     ctlr->r_rid1 = PCIR_BAR(0);
181     if (!(ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1,
182 						&ctlr->r_rid1, RF_ACTIVE)))
183 	return ENXIO;
184 
185     /* mask all host controller interrupts */
186     ATA_OUTL(ctlr->r_res1, 0x01d64, 0x00000000);
187 
188     /* mask all PCI interrupts */
189     ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x00000000);
190 
191     ctlr->allocate = ata_marvell_edma_allocate;
192     ctlr->reset = ata_marvell_edma_reset;
193     ctlr->dmainit = ata_marvell_edma_dmainit;
194     ctlr->setmode = ata_sata_setmode;
195     ctlr->channels = ctlr->chip->cfg1;
196 
197     /* clear host controller interrupts */
198     ATA_OUTL(ctlr->r_res1, 0x20014, 0x00000000);
199     if (ctlr->chip->cfg1 > 4)
200 	ATA_OUTL(ctlr->r_res1, 0x30014, 0x00000000);
201 
202     /* clear PCI interrupts */
203     ATA_OUTL(ctlr->r_res1, 0x01d58, 0x00000000);
204 
205     /* unmask PCI interrupts we want */
206     ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x007fffff);
207 
208     /* unmask host controller interrupts we want */
209     ATA_OUTL(ctlr->r_res1, 0x01d64, 0x000000ff/*HC0*/ | 0x0001fe00/*HC1*/ |
210 	     /*(1<<19) | (1<<20) | (1<<21) |*/(1<<22) | (1<<24) | (0x7f << 25));
211 
212     /* enable PCI interrupt */
213     pci_write_config(dev, PCIR_COMMAND,
214 		     pci_read_config(dev, PCIR_COMMAND, 2) & ~0x0400, 2);
215     return 0;
216 }
217 
218 static int
219 ata_marvell_edma_allocate(device_t dev)
220 {
221     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
222     struct ata_channel *ch = device_get_softc(dev);
223     u_int64_t work = ch->dma.work_bus;
224     int i;
225 
226     /* clear work area */
227     bzero(ch->dma.work, 1024+256);
228 
229     /* set legacy ATA resources */
230     for (i = ATA_DATA; i <= ATA_COMMAND; i++) {
231 	ch->r_io[i].res = ctlr->r_res1;
232 	ch->r_io[i].offset = 0x02100 + (i << 2) + ATA_MV_EDMA_BASE(ch);
233     }
234     ch->r_io[ATA_CONTROL].res = ctlr->r_res1;
235     ch->r_io[ATA_CONTROL].offset = 0x02120 + ATA_MV_EDMA_BASE(ch);
236     ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res1;
237     ata_default_registers(dev);
238 
239     /* set SATA resources */
240     switch (ctlr->chip->cfg2) {
241     case MV_50XX:
242 	ch->r_io[ATA_SSTATUS].res = ctlr->r_res1;
243 	ch->r_io[ATA_SSTATUS].offset =  0x00100 + ATA_MV_HOST_BASE(ch);
244 	ch->r_io[ATA_SERROR].res = ctlr->r_res1;
245 	ch->r_io[ATA_SERROR].offset = 0x00104 + ATA_MV_HOST_BASE(ch);
246 	ch->r_io[ATA_SCONTROL].res = ctlr->r_res1;
247 	ch->r_io[ATA_SCONTROL].offset = 0x00108 + ATA_MV_HOST_BASE(ch);
248 	break;
249     case MV_60XX:
250 	ch->r_io[ATA_SSTATUS].res = ctlr->r_res1;
251 	ch->r_io[ATA_SSTATUS].offset =  0x02300 + ATA_MV_EDMA_BASE(ch);
252 	ch->r_io[ATA_SERROR].res = ctlr->r_res1;
253 	ch->r_io[ATA_SERROR].offset = 0x02304 + ATA_MV_EDMA_BASE(ch);
254 	ch->r_io[ATA_SCONTROL].res = ctlr->r_res1;
255 	ch->r_io[ATA_SCONTROL].offset = 0x02308 + ATA_MV_EDMA_BASE(ch);
256 	ch->r_io[ATA_SACTIVE].res = ctlr->r_res1;
257 	ch->r_io[ATA_SACTIVE].offset = 0x02350 + ATA_MV_EDMA_BASE(ch);
258 	break;
259     }
260 
261     ch->flags |= ATA_NO_SLAVE;
262     ch->flags |= ATA_USE_16BIT; /* XXX SOS needed ? */
263     ata_generic_hw(dev);
264     ch->hw.begin_transaction = ata_marvell_edma_begin_transaction;
265     ch->hw.end_transaction = ata_marvell_edma_end_transaction;
266     ch->hw.status = ata_marvell_edma_status;
267 
268     /* disable the EDMA machinery */
269     ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002);
270     DELAY(100000);       /* SOS should poll for disabled */
271 
272     /* set configuration to non-queued 128b read transfers stop on error */
273     ATA_OUTL(ctlr->r_res1, 0x02000 + ATA_MV_EDMA_BASE(ch), (1<<11) | (1<<13));
274 
275     /* request queue base high */
276     ATA_OUTL(ctlr->r_res1, 0x02010 + ATA_MV_EDMA_BASE(ch), work >> 32);
277 
278     /* request queue in ptr */
279     ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff);
280 
281     /* request queue out ptr */
282     ATA_OUTL(ctlr->r_res1, 0x02018 + ATA_MV_EDMA_BASE(ch), 0x0);
283 
284     /* response queue base high */
285     work += 1024;
286     ATA_OUTL(ctlr->r_res1, 0x0201c + ATA_MV_EDMA_BASE(ch), work >> 32);
287 
288     /* response queue in ptr */
289     ATA_OUTL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch), 0x0);
290 
291     /* response queue out ptr */
292     ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff);
293 
294     /* clear SATA error register */
295     ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR));
296 
297     /* clear any outstanding error interrupts */
298     ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0);
299 
300     /* unmask all error interrupts */
301     ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0);
302 
303     /* enable EDMA machinery */
304     ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001);
305     return 0;
306 }
307 
308 static int
309 ata_marvell_edma_status(device_t dev)
310 {
311     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
312     struct ata_channel *ch = device_get_softc(dev);
313     u_int32_t cause = ATA_INL(ctlr->r_res1, 0x01d60);
314     int shift = (ch->unit << 1) + (ch->unit > 3);
315 
316     if (cause & (1 << shift)) {
317 
318 	/* clear interrupt(s) */
319 	ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0);
320 
321 	/* do we have any PHY events ? */
322 	ata_sata_phy_check_events(dev);
323     }
324 
325     /* do we have any device action ? */
326     return (cause & (2 << shift));
327 }
328 
329 /* must be called with ATA channel locked and state_mtx held */
330 static int
331 ata_marvell_edma_begin_transaction(struct ata_request *request)
332 {
333     struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev));
334     struct ata_channel *ch = device_get_softc(request->parent);
335     u_int32_t req_in;
336     u_int8_t *bytep;
337     u_int16_t *wordp;
338     u_int32_t *quadp;
339     int i;
340     int error, slot;
341 
342     /* only DMA R/W goes through the EMDA machine */
343     if (request->u.ata.command != ATA_READ_DMA &&
344 	request->u.ata.command != ATA_WRITE_DMA) {
345 
346 	/* disable the EDMA machinery */
347 	if (ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)
348 	    ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002);
349 	return ata_begin_transaction(request);
350     }
351 
352     /* check for 48 bit access and convert if needed */
353     ata_modify_if_48bit(request);
354 
355     /* check sanity, setup SG list and DMA engine */
356     if ((error = ch->dma.load(request, NULL, NULL))) {
357 	device_printf(request->dev, "setting up DMA failed\n");
358 	request->result = error;
359 	return ATA_OP_FINISHED;
360     }
361 
362     /* get next free request queue slot */
363     req_in = ATA_INL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch));
364     slot = (((req_in & ~0xfffffc00) >> 5) + 0) & 0x1f;
365     bytep = (u_int8_t *)(ch->dma.work);
366     bytep += (slot << 5);
367     wordp = (u_int16_t *)bytep;
368     quadp = (u_int32_t *)bytep;
369 
370     /* fill in this request */
371     quadp[0] = (long)request->dma->sg_bus & 0xffffffff;
372     quadp[1] = (u_int64_t)request->dma->sg_bus >> 32;
373     wordp[4] = (request->flags & ATA_R_READ ? 0x01 : 0x00) | (request->tag<<1);
374 
375     i = 10;
376     bytep[i++] = (request->u.ata.count >> 8) & 0xff;
377     bytep[i++] = 0x10 | ATA_COUNT;
378     bytep[i++] = request->u.ata.count & 0xff;
379     bytep[i++] = 0x10 | ATA_COUNT;
380 
381     bytep[i++] = (request->u.ata.lba >> 24) & 0xff;
382     bytep[i++] = 0x10 | ATA_SECTOR;
383     bytep[i++] = request->u.ata.lba & 0xff;
384     bytep[i++] = 0x10 | ATA_SECTOR;
385 
386     bytep[i++] = (request->u.ata.lba >> 32) & 0xff;
387     bytep[i++] = 0x10 | ATA_CYL_LSB;
388     bytep[i++] = (request->u.ata.lba >> 8) & 0xff;
389     bytep[i++] = 0x10 | ATA_CYL_LSB;
390 
391     bytep[i++] = (request->u.ata.lba >> 40) & 0xff;
392     bytep[i++] = 0x10 | ATA_CYL_MSB;
393     bytep[i++] = (request->u.ata.lba >> 16) & 0xff;
394     bytep[i++] = 0x10 | ATA_CYL_MSB;
395 
396     bytep[i++] = ATA_D_LBA | ATA_D_IBM | ((request->u.ata.lba >> 24) & 0xf);
397     bytep[i++] = 0x10 | ATA_DRIVE;
398 
399     bytep[i++] = request->u.ata.command;
400     bytep[i++] = 0x90 | ATA_COMMAND;
401 
402     /* enable EDMA machinery if needed */
403     if (!(ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) {
404 	ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001);
405 	while (!(ATA_INL(ctlr->r_res1,
406 			 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001))
407 	    DELAY(10);
408     }
409 
410     /* tell EDMA it has a new request */
411     slot = (((req_in & ~0xfffffc00) >> 5) + 1) & 0x1f;
412     req_in &= 0xfffffc00;
413     req_in += (slot << 5);
414     ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), req_in);
415 
416     return ATA_OP_CONTINUES;
417 }
418 
419 /* must be called with ATA channel locked and state_mtx held */
420 static int
421 ata_marvell_edma_end_transaction(struct ata_request *request)
422 {
423     struct ata_pci_controller *ctlr=device_get_softc(GRANDPARENT(request->dev));
424     struct ata_channel *ch = device_get_softc(request->parent);
425     int offset = (ch->unit > 3 ? 0x30014 : 0x20014);
426     u_int32_t icr = ATA_INL(ctlr->r_res1, offset);
427     int res;
428 
429     /* EDMA interrupt */
430     if ((icr & (0x0001 << (ch->unit & 3)))) {
431 	struct ata_marvell_response *response;
432 	u_int32_t rsp_in, rsp_out;
433 	int slot;
434 
435 	/* stop timeout */
436 	callout_stop(&request->callout);
437 
438 	/* get response ptr's */
439 	rsp_in = ATA_INL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch));
440 	rsp_out = ATA_INL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch));
441 	slot = (((rsp_in & ~0xffffff00) >> 3)) & 0x1f;
442 	rsp_out &= 0xffffff00;
443 	rsp_out += (slot << 3);
444 	response = (struct ata_marvell_response *)
445 		   (ch->dma.work + 1024 + (slot << 3));
446 
447 	/* record status for this request */
448 	request->status = response->dev_status;
449 	request->error = 0;
450 
451 	/* ack response */
452 	ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), rsp_out);
453 
454 	/* update progress */
455 	if (!(request->status & ATA_S_ERROR) &&
456 	    !(request->flags & ATA_R_TIMEOUT))
457 	    request->donecount = request->bytecount;
458 
459 	/* unload SG list */
460 	ch->dma.unload(request);
461 
462 	res = ATA_OP_FINISHED;
463     }
464 
465     /* legacy ATA interrupt */
466     else {
467 	res = ata_end_transaction(request);
468     }
469 
470     /* ack interrupt */
471     ATA_OUTL(ctlr->r_res1, offset, ~(icr & (0x0101 << (ch->unit & 3))));
472     return res;
473 }
474 
475 static void
476 ata_marvell_edma_reset(device_t dev)
477 {
478     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
479     struct ata_channel *ch = device_get_softc(dev);
480 
481     /* disable the EDMA machinery */
482     ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002);
483     while ((ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001))
484 	DELAY(10);
485 
486     /* clear SATA error register */
487     ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR));
488 
489     /* clear any outstanding error interrupts */
490     ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0);
491 
492     /* unmask all error interrupts */
493     ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0);
494 
495     /* enable channel and test for devices */
496     if (ata_sata_phy_reset(dev))
497 	ata_generic_reset(dev);
498 
499     /* enable EDMA machinery */
500     ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001);
501 }
502 
503 static void
504 ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs,
505 			   int error)
506 {
507     struct ata_dmasetprd_args *args = xsc;
508     struct ata_marvell_dma_prdentry *prd = args->dmatab;
509     int i;
510 
511     if ((args->error = error))
512 	return;
513 
514     for (i = 0; i < nsegs; i++) {
515 	prd[i].addrlo = htole32(segs[i].ds_addr);
516 	prd[i].count = htole32(segs[i].ds_len);
517 	prd[i].addrhi = htole32((u_int64_t)segs[i].ds_addr >> 32);
518     }
519     prd[i - 1].count |= htole32(ATA_DMA_EOT);
520     KASSERT(nsegs <= ATA_DMA_ENTRIES, ("too many DMA segment entries\n"));
521     args->nsegs = nsegs;
522 }
523 
524 static void
525 ata_marvell_edma_dmainit(device_t dev)
526 {
527     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
528     struct ata_channel *ch = device_get_softc(dev);
529 
530     ata_dmainit(dev);
531     /* note start and stop are not used here */
532     ch->dma.setprd = ata_marvell_edma_dmasetprd;
533 
534     /* if 64bit support present adjust max address used */
535     if (ATA_INL(ctlr->r_res1, 0x00d00) & 0x00000004)
536 	ch->dma.max_address = BUS_SPACE_MAXADDR;
537 
538     /* chip does not reliably do 64K DMA transfers */
539     ch->dma.max_iosize = 64 * DEV_BSIZE;
540 }
541 
542 ATA_DECLARE_DRIVER(ata_marvell);
543