xref: /freebsd/sys/dev/ata/chipsets/ata-marvell.c (revision 066f913a94b134b6d5e32b6af88f297c7da9c031)
1 /*-
2  * Copyright (c) 1998 - 2008 S�ren Schmidt <sos@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26 
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29 
30 #include "opt_ata.h"
31 #include <sys/param.h>
32 #include <sys/module.h>
33 #include <sys/systm.h>
34 #include <sys/kernel.h>
35 #include <sys/ata.h>
36 #include <sys/bus.h>
37 #include <sys/endian.h>
38 #include <sys/malloc.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/sema.h>
42 #include <sys/taskqueue.h>
43 #include <vm/uma.h>
44 #include <machine/stdarg.h>
45 #include <machine/resource.h>
46 #include <machine/bus.h>
47 #include <sys/rman.h>
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/ata/ata-all.h>
51 #include <dev/ata/ata-pci.h>
52 #include <ata_if.h>
53 
54 /* local prototypes */
55 static int ata_marvell_chipinit(device_t dev);
56 static int ata_marvell_ch_attach(device_t dev);
57 static int ata_marvell_setmode(device_t dev, int target, int mode);
58 static int ata_marvell_edma_ch_attach(device_t dev);
59 static int ata_marvell_edma_ch_detach(device_t dev);
60 static int ata_marvell_edma_status(device_t dev);
61 static int ata_marvell_edma_begin_transaction(struct ata_request *request);
62 static int ata_marvell_edma_end_transaction(struct ata_request *request);
63 static void ata_marvell_edma_reset(device_t dev);
64 static void ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
65 static void ata_marvell_edma_dmainit(device_t dev);
66 
67 /* misc defines */
68 #define MV_50XX		50
69 #define MV_60XX		60
70 #define MV_6042		62
71 #define MV_7042		72
72 #define MV_61XX		61
73 
74 
75 /*
76  * Marvell chipset support functions
77  */
78 #define ATA_MV_HOST_BASE(ch) \
79 	((ch->unit & 3) * 0x0100) + (ch->unit > 3 ? 0x30000 : 0x20000)
80 #define ATA_MV_EDMA_BASE(ch) \
81 	((ch->unit & 3) * 0x2000) + (ch->unit > 3 ? 0x30000 : 0x20000)
82 
83 struct ata_marvell_response {
84     u_int16_t   tag;
85     u_int8_t    edma_status;
86     u_int8_t    dev_status;
87     u_int32_t   timestamp;
88 };
89 
90 struct ata_marvell_dma_prdentry {
91     u_int32_t addrlo;
92     u_int32_t count;
93     u_int32_t addrhi;
94     u_int32_t reserved;
95 };
96 
97 static int
98 ata_marvell_probe(device_t dev)
99 {
100     struct ata_pci_controller *ctlr = device_get_softc(dev);
101     static struct ata_chip_id ids[] =
102     {{ ATA_M88SX5040, 0, 4, MV_50XX, ATA_SA150, "88SX5040" },
103      { ATA_M88SX5041, 0, 4, MV_50XX, ATA_SA150, "88SX5041" },
104      { ATA_M88SX5080, 0, 8, MV_50XX, ATA_SA150, "88SX5080" },
105      { ATA_M88SX5081, 0, 8, MV_50XX, ATA_SA150, "88SX5081" },
106      { ATA_M88SX6041, 0, 4, MV_60XX, ATA_SA300, "88SX6041" },
107      { ATA_M88SX6042, 0, 4, MV_6042, ATA_SA300, "88SX6042" },
108      { ATA_M88SX6081, 0, 8, MV_60XX, ATA_SA300, "88SX6081" },
109      { ATA_M88SX7042, 0, 4, MV_7042, ATA_SA300, "88SX7042" },
110      { ATA_M88SX6101, 0, 0, MV_61XX, ATA_UDMA6, "88SX6101" },
111      { ATA_M88SX6102, 0, 0, MV_61XX, ATA_UDMA6, "88SX6102" },
112      { ATA_M88SX6111, 0, 1, MV_61XX, ATA_UDMA6, "88SX6111" },
113      { ATA_M88SX6121, 0, 2, MV_61XX, ATA_UDMA6, "88SX6121" },
114      { ATA_M88SX6141, 0, 4, MV_61XX, ATA_UDMA6, "88SX6141" },
115      { ATA_M88SX6145, 0, 4, MV_61XX, ATA_UDMA6, "88SX6145" },
116      { 0, 0, 0, 0, 0, 0}};
117 
118     if (pci_get_vendor(dev) != ATA_MARVELL_ID)
119 	return ENXIO;
120 
121     if (!(ctlr->chip = ata_match_chip(dev, ids)))
122 	return ENXIO;
123 
124     ata_set_desc(dev);
125 
126     switch (ctlr->chip->cfg2) {
127     case MV_50XX:
128     case MV_60XX:
129     case MV_6042:
130     case MV_7042:
131 	ctlr->chipinit = ata_marvell_edma_chipinit;
132 	break;
133     case MV_61XX:
134 	ctlr->chipinit = ata_marvell_chipinit;
135 	break;
136     }
137     return (BUS_PROBE_DEFAULT);
138 }
139 
140 static int
141 ata_marvell_chipinit(device_t dev)
142 {
143 	struct ata_pci_controller *ctlr = device_get_softc(dev);
144 	device_t child;
145 
146 	if (ata_setup_interrupt(dev, ata_generic_intr))
147 		return ENXIO;
148 	/* Create AHCI subdevice if AHCI part present. */
149 	if (ctlr->chip->cfg1) {
150 	    	child = device_add_child(dev, NULL, -1);
151 		if (child != NULL) {
152 		    device_set_ivars(child, (void *)(intptr_t)-1);
153 		    bus_generic_attach(dev);
154 		}
155 	}
156         ctlr->ch_attach = ata_marvell_ch_attach;
157 	ctlr->ch_detach = ata_pci_ch_detach;
158 	ctlr->reset = ata_generic_reset;
159         ctlr->setmode = ata_marvell_setmode;
160         ctlr->channels = 1;
161         return (0);
162 }
163 
164 static int
165 ata_marvell_ch_attach(device_t dev)
166 {
167 	struct ata_channel *ch = device_get_softc(dev);
168 	int error;
169 
170 	error = ata_pci_ch_attach(dev);
171     	/* dont use 32 bit PIO transfers */
172 	ch->flags |= ATA_USE_16BIT;
173 	ch->flags |= ATA_CHECKS_CABLE;
174 	return (error);
175 }
176 
177 static int
178 ata_marvell_setmode(device_t dev, int target, int mode)
179 {
180 	struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
181 	struct ata_channel *ch = device_get_softc(dev);
182 
183 	mode = min(mode, ctlr->chip->max_dma);
184 	/* Check for 80pin cable present. */
185 	if (mode > ATA_UDMA2 && ATA_IDX_INB(ch, ATA_BMDEVSPEC_0) & 0x01) {
186 		ata_print_cable(dev, "controller");
187 		mode = ATA_UDMA2;
188 	}
189 	/* Nothing to do to setup mode, the controller snoop SET_FEATURE cmd. */
190 	return (mode);
191 }
192 
193 int
194 ata_marvell_edma_chipinit(device_t dev)
195 {
196     struct ata_pci_controller *ctlr = device_get_softc(dev);
197 
198     if (ata_setup_interrupt(dev, ata_generic_intr))
199 	return ENXIO;
200 
201     ctlr->r_type1 = SYS_RES_MEMORY;
202     ctlr->r_rid1 = PCIR_BAR(0);
203     if (!(ctlr->r_res1 = bus_alloc_resource_any(dev, ctlr->r_type1,
204 						&ctlr->r_rid1, RF_ACTIVE)))
205 	return ENXIO;
206 
207     /* mask all host controller interrupts */
208     ATA_OUTL(ctlr->r_res1, 0x01d64, 0x00000000);
209 
210     /* mask all PCI interrupts */
211     ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x00000000);
212 
213     ctlr->ch_attach = ata_marvell_edma_ch_attach;
214     ctlr->ch_detach = ata_marvell_edma_ch_detach;
215     ctlr->reset = ata_marvell_edma_reset;
216     ctlr->setmode = ata_sata_setmode;
217     ctlr->getrev = ata_sata_getrev;
218     ctlr->channels = ctlr->chip->cfg1;
219 
220     /* clear host controller interrupts */
221     ATA_OUTL(ctlr->r_res1, 0x20014, 0x00000000);
222     if (ctlr->chip->cfg1 > 4)
223 	ATA_OUTL(ctlr->r_res1, 0x30014, 0x00000000);
224 
225     /* clear PCI interrupts */
226     ATA_OUTL(ctlr->r_res1, 0x01d58, 0x00000000);
227 
228     /* unmask PCI interrupts we want */
229     ATA_OUTL(ctlr->r_res1, 0x01d5c, 0x007fffff);
230 
231     /* unmask host controller interrupts we want */
232     ATA_OUTL(ctlr->r_res1, 0x01d64, 0x000000ff/*HC0*/ | 0x0001fe00/*HC1*/ |
233 	     /*(1<<19) | (1<<20) | (1<<21) |*/(1<<22) | (1<<24) | (0x7f << 25));
234 
235     return 0;
236 }
237 
238 static int
239 ata_marvell_edma_ch_attach(device_t dev)
240 {
241     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
242     struct ata_channel *ch = device_get_softc(dev);
243     u_int64_t work;
244     int i;
245 
246     ata_marvell_edma_dmainit(dev);
247     work = ch->dma.work_bus;
248     /* clear work area */
249     bzero(ch->dma.work, 1024+256);
250     bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
251 	BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
252 
253     /* set legacy ATA resources */
254     for (i = ATA_DATA; i <= ATA_COMMAND; i++) {
255 	ch->r_io[i].res = ctlr->r_res1;
256 	ch->r_io[i].offset = 0x02100 + (i << 2) + ATA_MV_EDMA_BASE(ch);
257     }
258     ch->r_io[ATA_CONTROL].res = ctlr->r_res1;
259     ch->r_io[ATA_CONTROL].offset = 0x02120 + ATA_MV_EDMA_BASE(ch);
260     ch->r_io[ATA_IDX_ADDR].res = ctlr->r_res1;
261     ata_default_registers(dev);
262 
263     /* set SATA resources */
264     switch (ctlr->chip->cfg2) {
265     case MV_50XX:
266 	ch->r_io[ATA_SSTATUS].res = ctlr->r_res1;
267 	ch->r_io[ATA_SSTATUS].offset =  0x00100 + ATA_MV_HOST_BASE(ch);
268 	ch->r_io[ATA_SERROR].res = ctlr->r_res1;
269 	ch->r_io[ATA_SERROR].offset = 0x00104 + ATA_MV_HOST_BASE(ch);
270 	ch->r_io[ATA_SCONTROL].res = ctlr->r_res1;
271 	ch->r_io[ATA_SCONTROL].offset = 0x00108 + ATA_MV_HOST_BASE(ch);
272 	break;
273     case MV_60XX:
274     case MV_6042:
275     case MV_7042:
276 	ch->r_io[ATA_SSTATUS].res = ctlr->r_res1;
277 	ch->r_io[ATA_SSTATUS].offset =  0x02300 + ATA_MV_EDMA_BASE(ch);
278 	ch->r_io[ATA_SERROR].res = ctlr->r_res1;
279 	ch->r_io[ATA_SERROR].offset = 0x02304 + ATA_MV_EDMA_BASE(ch);
280 	ch->r_io[ATA_SCONTROL].res = ctlr->r_res1;
281 	ch->r_io[ATA_SCONTROL].offset = 0x02308 + ATA_MV_EDMA_BASE(ch);
282 	ch->r_io[ATA_SACTIVE].res = ctlr->r_res1;
283 	ch->r_io[ATA_SACTIVE].offset = 0x02350 + ATA_MV_EDMA_BASE(ch);
284 	break;
285     }
286 
287     ch->flags |= ATA_NO_SLAVE;
288     ch->flags |= ATA_USE_16BIT; /* XXX SOS needed ? */
289     ch->flags |= ATA_SATA;
290     ata_generic_hw(dev);
291     ch->hw.begin_transaction = ata_marvell_edma_begin_transaction;
292     ch->hw.end_transaction = ata_marvell_edma_end_transaction;
293     ch->hw.status = ata_marvell_edma_status;
294 
295     /* disable the EDMA machinery */
296     ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002);
297     DELAY(100000);       /* SOS should poll for disabled */
298 
299     /* set configuration to non-queued 128b read transfers stop on error */
300     ATA_OUTL(ctlr->r_res1, 0x02000 + ATA_MV_EDMA_BASE(ch), (1<<11) | (1<<13));
301 
302     /* request queue base high */
303     ATA_OUTL(ctlr->r_res1, 0x02010 + ATA_MV_EDMA_BASE(ch), work >> 32);
304 
305     /* request queue in ptr */
306     ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff);
307 
308     /* request queue out ptr */
309     ATA_OUTL(ctlr->r_res1, 0x02018 + ATA_MV_EDMA_BASE(ch), 0x0);
310 
311     /* response queue base high */
312     work += 1024;
313     ATA_OUTL(ctlr->r_res1, 0x0201c + ATA_MV_EDMA_BASE(ch), work >> 32);
314 
315     /* response queue in ptr */
316     ATA_OUTL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch), 0x0);
317 
318     /* response queue out ptr */
319     ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), work & 0xffffffff);
320 
321     /* clear SATA error register */
322     ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR));
323 
324     /* clear any outstanding error interrupts */
325     ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0);
326 
327     /* unmask all error interrupts */
328     ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0);
329 
330     /* enable EDMA machinery */
331     ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001);
332     return 0;
333 }
334 
335 static int
336 ata_marvell_edma_ch_detach(device_t dev)
337 {
338     struct ata_channel *ch = device_get_softc(dev);
339 
340     if (ch->dma.work_tag && ch->dma.work_map)
341 	bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
342 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
343     ata_dmafini(dev);
344     return (0);
345 }
346 
347 static int
348 ata_marvell_edma_status(device_t dev)
349 {
350     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
351     struct ata_channel *ch = device_get_softc(dev);
352     u_int32_t cause = ATA_INL(ctlr->r_res1, 0x01d60);
353     int shift = (ch->unit << 1) + (ch->unit > 3);
354 
355     if (cause & (1 << shift)) {
356 
357 	/* clear interrupt(s) */
358 	ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0);
359 
360 	/* do we have any PHY events ? */
361 	ata_sata_phy_check_events(dev);
362     }
363 
364     /* do we have any device action ? */
365     return (cause & (2 << shift));
366 }
367 
368 /* must be called with ATA channel locked and state_mtx held */
369 static int
370 ata_marvell_edma_begin_transaction(struct ata_request *request)
371 {
372     struct ata_pci_controller *ctlr=device_get_softc(device_get_parent(request->parent));
373     struct ata_channel *ch = device_get_softc(request->parent);
374     u_int32_t req_in;
375     u_int8_t *bytep;
376     int i;
377     int error, slot;
378 
379     /* only DMA R/W goes through the EMDA machine */
380     if (request->u.ata.command != ATA_READ_DMA &&
381 	request->u.ata.command != ATA_WRITE_DMA &&
382 	request->u.ata.command != ATA_READ_DMA48 &&
383 	request->u.ata.command != ATA_WRITE_DMA48) {
384 
385 	/* disable the EDMA machinery */
386 	if (ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)
387 	    ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002);
388 	return ata_begin_transaction(request);
389     }
390 
391     /* check sanity, setup SG list and DMA engine */
392     if ((error = ch->dma.load(request, NULL, NULL))) {
393 	device_printf(request->parent, "setting up DMA failed\n");
394 	request->result = error;
395 	return ATA_OP_FINISHED;
396     }
397 
398     /* get next free request queue slot */
399     req_in = ATA_INL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch));
400     slot = (((req_in & ~0xfffffc00) >> 5) + 0) & 0x1f;
401     bytep = (u_int8_t *)(ch->dma.work);
402     bytep += (slot << 5);
403 
404     /* fill in this request */
405     le32enc(bytep + 0 * sizeof(u_int32_t),
406 	request->dma->sg_bus & 0xffffffff);
407     le32enc(bytep + 1 * sizeof(u_int32_t),
408 	(u_int64_t)request->dma->sg_bus >> 32);
409     if (ctlr->chip->cfg2 != MV_6042 && ctlr->chip->cfg2 != MV_7042) {
410 	    le16enc(bytep + 4 * sizeof(u_int16_t),
411 		(request->flags & ATA_R_READ ? 0x01 : 0x00) | (request->tag << 1));
412 
413 	    i = 10;
414 	    bytep[i++] = (request->u.ata.count >> 8) & 0xff;
415 	    bytep[i++] = 0x10 | ATA_COUNT;
416 	    bytep[i++] = request->u.ata.count & 0xff;
417 	    bytep[i++] = 0x10 | ATA_COUNT;
418 
419 	    bytep[i++] = (request->u.ata.lba >> 24) & 0xff;
420 	    bytep[i++] = 0x10 | ATA_SECTOR;
421 	    bytep[i++] = request->u.ata.lba & 0xff;
422 	    bytep[i++] = 0x10 | ATA_SECTOR;
423 
424 	    bytep[i++] = (request->u.ata.lba >> 32) & 0xff;
425 	    bytep[i++] = 0x10 | ATA_CYL_LSB;
426 	    bytep[i++] = (request->u.ata.lba >> 8) & 0xff;
427 	    bytep[i++] = 0x10 | ATA_CYL_LSB;
428 
429 	    bytep[i++] = (request->u.ata.lba >> 40) & 0xff;
430 	    bytep[i++] = 0x10 | ATA_CYL_MSB;
431 	    bytep[i++] = (request->u.ata.lba >> 16) & 0xff;
432 	    bytep[i++] = 0x10 | ATA_CYL_MSB;
433 
434 	    bytep[i++] = ATA_D_LBA | ATA_D_IBM | ((request->u.ata.lba >> 24) & 0xf);
435 	    bytep[i++] = 0x10 | ATA_DRIVE;
436 
437 	    bytep[i++] = request->u.ata.command;
438 	    bytep[i++] = 0x90 | ATA_COMMAND;
439     } else {
440 	    le32enc(bytep + 2 * sizeof(u_int32_t),
441 		(request->flags & ATA_R_READ ? 0x01 : 0x00) | (request->tag << 1));
442 
443 	    i = 16;
444 	    bytep[i++] = 0;
445 	    bytep[i++] = 0;
446 	    bytep[i++] = request->u.ata.command;
447 	    bytep[i++] = request->u.ata.feature & 0xff;
448 
449 	    bytep[i++] = request->u.ata.lba & 0xff;
450 	    bytep[i++] = (request->u.ata.lba >> 8) & 0xff;
451 	    bytep[i++] = (request->u.ata.lba >> 16) & 0xff;
452 	    bytep[i++] = ATA_D_LBA | ATA_D_IBM | ((request->u.ata.lba >> 24) & 0x0f);
453 
454 	    bytep[i++] = (request->u.ata.lba >> 24) & 0xff;
455 	    bytep[i++] = (request->u.ata.lba >> 32) & 0xff;
456 	    bytep[i++] = (request->u.ata.lba >> 40) & 0xff;
457 	    bytep[i++] = (request->u.ata.feature >> 8) & 0xff;
458 
459 	    bytep[i++] = request->u.ata.count & 0xff;
460 	    bytep[i++] = (request->u.ata.count >> 8) & 0xff;
461 	    bytep[i++] = 0;
462 	    bytep[i++] = 0;
463     }
464 
465     bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
466 	BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
467 
468     /* enable EDMA machinery if needed */
469     if (!(ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001)) {
470 	ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001);
471 	while (!(ATA_INL(ctlr->r_res1,
472 			 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001))
473 	    DELAY(10);
474     }
475 
476     /* tell EDMA it has a new request */
477     slot = (((req_in & ~0xfffffc00) >> 5) + 1) & 0x1f;
478     req_in &= 0xfffffc00;
479     req_in += (slot << 5);
480     ATA_OUTL(ctlr->r_res1, 0x02014 + ATA_MV_EDMA_BASE(ch), req_in);
481 
482     return ATA_OP_CONTINUES;
483 }
484 
485 /* must be called with ATA channel locked and state_mtx held */
486 static int
487 ata_marvell_edma_end_transaction(struct ata_request *request)
488 {
489     struct ata_pci_controller *ctlr=device_get_softc(device_get_parent(request->parent));
490     struct ata_channel *ch = device_get_softc(request->parent);
491     int offset = (ch->unit > 3 ? 0x30014 : 0x20014);
492     u_int32_t icr = ATA_INL(ctlr->r_res1, offset);
493     int res;
494 
495     /* EDMA interrupt */
496     if ((icr & (0x0001 << (ch->unit & 3)))) {
497 	struct ata_marvell_response *response;
498 	u_int32_t rsp_in, rsp_out;
499 	int slot;
500 
501 	/* stop timeout */
502 	callout_stop(&request->callout);
503 
504 	/* get response ptr's */
505 	rsp_in = ATA_INL(ctlr->r_res1, 0x02020 + ATA_MV_EDMA_BASE(ch));
506 	rsp_out = ATA_INL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch));
507 	slot = (((rsp_in & ~0xffffff00) >> 3)) & 0x1f;
508 	rsp_out &= 0xffffff00;
509 	rsp_out += (slot << 3);
510 	bus_dmamap_sync(ch->dma.work_tag, ch->dma.work_map,
511 	    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
512 	response = (struct ata_marvell_response *)
513 		   (ch->dma.work + 1024 + (slot << 3));
514 
515 	/* record status for this request */
516 	request->status = response->dev_status;
517 	request->error = 0;
518 
519 	/* ack response */
520 	ATA_OUTL(ctlr->r_res1, 0x02024 + ATA_MV_EDMA_BASE(ch), rsp_out);
521 
522 	/* update progress */
523 	if (!(request->status & ATA_S_ERROR) &&
524 	    !(request->flags & ATA_R_TIMEOUT))
525 	    request->donecount = request->bytecount;
526 
527 	/* unload SG list */
528 	ch->dma.unload(request);
529 
530 	res = ATA_OP_FINISHED;
531     }
532 
533     /* legacy ATA interrupt */
534     else {
535 	res = ata_end_transaction(request);
536     }
537 
538     /* ack interrupt */
539     ATA_OUTL(ctlr->r_res1, offset, ~(icr & (0x0101 << (ch->unit & 3))));
540     return res;
541 }
542 
543 static void
544 ata_marvell_edma_reset(device_t dev)
545 {
546     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
547     struct ata_channel *ch = device_get_softc(dev);
548 
549     /* disable the EDMA machinery */
550     ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000002);
551     while ((ATA_INL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch)) & 0x00000001))
552 	DELAY(10);
553 
554     /* clear SATA error register */
555     ATA_IDX_OUTL(ch, ATA_SERROR, ATA_IDX_INL(ch, ATA_SERROR));
556 
557     /* clear any outstanding error interrupts */
558     ATA_OUTL(ctlr->r_res1, 0x02008 + ATA_MV_EDMA_BASE(ch), 0x0);
559 
560     /* unmask all error interrupts */
561     ATA_OUTL(ctlr->r_res1, 0x0200c + ATA_MV_EDMA_BASE(ch), ~0x0);
562 
563     /* enable channel and test for devices */
564     if (ata_sata_phy_reset(dev, -1, 1))
565 	ata_generic_reset(dev);
566 
567     /* enable EDMA machinery */
568     ATA_OUTL(ctlr->r_res1, 0x02028 + ATA_MV_EDMA_BASE(ch), 0x00000001);
569 }
570 
571 static void
572 ata_marvell_edma_dmasetprd(void *xsc, bus_dma_segment_t *segs, int nsegs,
573 			   int error)
574 {
575     struct ata_dmasetprd_args *args = xsc;
576     struct ata_marvell_dma_prdentry *prd = args->dmatab;
577     int i;
578 
579     if ((args->error = error))
580 	return;
581 
582     for (i = 0; i < nsegs; i++) {
583 	prd[i].addrlo = htole32(segs[i].ds_addr);
584 	prd[i].count = htole32(segs[i].ds_len);
585 	prd[i].addrhi = htole32((u_int64_t)segs[i].ds_addr >> 32);
586 	prd[i].reserved = 0;
587     }
588     prd[i - 1].count |= htole32(ATA_DMA_EOT);
589     KASSERT(nsegs <= ATA_DMA_ENTRIES, ("too many DMA segment entries\n"));
590     args->nsegs = nsegs;
591 }
592 
593 static void
594 ata_marvell_edma_dmainit(device_t dev)
595 {
596     struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev));
597     struct ata_channel *ch = device_get_softc(dev);
598 
599     ata_dmainit(dev);
600     /* note start and stop are not used here */
601     ch->dma.setprd = ata_marvell_edma_dmasetprd;
602 
603     /* if 64bit support present adjust max address used */
604     if (ATA_INL(ctlr->r_res1, 0x00d00) & 0x00000004)
605 	ch->dma.max_address = BUS_SPACE_MAXADDR;
606 
607     /* chip does not reliably do 64K DMA transfers */
608     if (ctlr->chip->cfg2 == MV_50XX || ctlr->chip->cfg2 == MV_60XX)
609 	ch->dma.max_iosize = 64 * DEV_BSIZE;
610 }
611 
612 ATA_DECLARE_DRIVER(ata_marvell);
613