1 /*-
2 * SPDX-License-Identifier: BSD-3-Clause
3 *
4 * Copyright 2008 by Nathan Whitehorn. All rights reserved.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
14 * 3. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
22 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
24 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
28 */
29
30 /*
31 * Common routines for the DMA engine on both the Apple Kauai and MacIO
32 * ATA controllers.
33 */
34
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/module.h>
39 #include <sys/bus.h>
40 #include <sys/malloc.h>
41 #include <sys/sema.h>
42 #include <sys/taskqueue.h>
43 #include <vm/uma.h>
44 #include <machine/stdarg.h>
45 #include <machine/resource.h>
46 #include <machine/bus.h>
47 #include <sys/rman.h>
48 #include <sys/ata.h>
49 #include <dev/ata/ata-all.h>
50 #include <dev/ata/ata-pci.h>
51 #include <ata_if.h>
52
53 #include "ata_dbdma.h"
54
55 struct ata_dbdma_dmaload_args {
56 struct ata_dbdma_channel *sc;
57
58 int write;
59 int nsegs;
60 };
61
62 static void
ata_dbdma_setprd(void * xarg,bus_dma_segment_t * segs,int nsegs,int error)63 ata_dbdma_setprd(void *xarg, bus_dma_segment_t *segs, int nsegs, int error)
64 {
65 struct ata_dbdma_dmaload_args *arg = xarg;
66 struct ata_dbdma_channel *sc = arg->sc;
67 int branch_type, command;
68 int prev_stop;
69 int i;
70
71 mtx_lock(&sc->dbdma_mtx);
72
73 prev_stop = sc->next_dma_slot-1;
74 if (prev_stop < 0)
75 prev_stop = 0xff;
76
77 for (i = 0; i < nsegs; i++) {
78 /* Loop back to the beginning if this is our last slot */
79 if (sc->next_dma_slot == 0xff)
80 branch_type = DBDMA_ALWAYS;
81 else
82 branch_type = DBDMA_NEVER;
83
84 if (arg->write) {
85 command = (i + 1 < nsegs) ? DBDMA_OUTPUT_MORE :
86 DBDMA_OUTPUT_LAST;
87 } else {
88 command = (i + 1 < nsegs) ? DBDMA_INPUT_MORE :
89 DBDMA_INPUT_LAST;
90 }
91
92 dbdma_insert_command(sc->dbdma, sc->next_dma_slot++,
93 command, 0, segs[i].ds_addr, segs[i].ds_len,
94 DBDMA_NEVER, branch_type, DBDMA_NEVER, 0);
95
96 if (branch_type == DBDMA_ALWAYS)
97 sc->next_dma_slot = 0;
98 }
99
100 /* We have a corner case where the STOP command is the last slot,
101 * but you can't branch in STOP commands. So add a NOP branch here
102 * and the STOP in slot 0. */
103
104 if (sc->next_dma_slot == 0xff) {
105 dbdma_insert_branch(sc->dbdma, sc->next_dma_slot, 0);
106 sc->next_dma_slot = 0;
107 }
108
109 #if 0
110 dbdma_insert_command(sc->dbdma, sc->next_dma_slot++,
111 DBDMA_NOP, 0, 0, 0, DBDMA_ALWAYS, DBDMA_NEVER, DBDMA_NEVER, 0);
112 #endif
113 dbdma_insert_stop(sc->dbdma, sc->next_dma_slot++);
114 dbdma_insert_nop(sc->dbdma, prev_stop);
115
116 dbdma_sync_commands(sc->dbdma, BUS_DMASYNC_PREWRITE);
117
118 mtx_unlock(&sc->dbdma_mtx);
119
120 arg->nsegs = nsegs;
121 }
122
123 static int
ata_dbdma_status(device_t dev)124 ata_dbdma_status(device_t dev)
125 {
126 struct ata_dbdma_channel *sc = device_get_softc(dev);
127 struct ata_channel *ch = device_get_softc(dev);
128
129 if (sc->sc_ch.dma.flags & ATA_DMA_ACTIVE) {
130 return (!(dbdma_get_chan_status(sc->dbdma) &
131 DBDMA_STATUS_ACTIVE));
132 }
133
134 if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY) {
135 DELAY(100);
136 if (ATA_IDX_INB(ch, ATA_ALTSTAT) & ATA_S_BUSY)
137 return 0;
138 }
139 return 1;
140 }
141
142 static int
ata_dbdma_start(struct ata_request * request)143 ata_dbdma_start(struct ata_request *request)
144 {
145 struct ata_dbdma_channel *sc = device_get_softc(request->parent);
146
147 sc->sc_ch.dma.flags |= ATA_DMA_ACTIVE;
148 dbdma_wake(sc->dbdma);
149 return 0;
150 }
151
152 static void
ata_dbdma_reset(device_t dev)153 ata_dbdma_reset(device_t dev)
154 {
155 struct ata_dbdma_channel *sc = device_get_softc(dev);
156
157 mtx_lock(&sc->dbdma_mtx);
158
159 dbdma_stop(sc->dbdma);
160 dbdma_insert_stop(sc->dbdma, 0);
161 sc->next_dma_slot=1;
162 dbdma_set_current_cmd(sc->dbdma, 0);
163
164 sc->sc_ch.dma.flags &= ~ATA_DMA_ACTIVE;
165
166 mtx_unlock(&sc->dbdma_mtx);
167 }
168
169 static int
ata_dbdma_stop(struct ata_request * request)170 ata_dbdma_stop(struct ata_request *request)
171 {
172 struct ata_dbdma_channel *sc = device_get_softc(request->parent);
173
174 uint16_t status;
175
176 status = dbdma_get_chan_status(sc->dbdma);
177
178 dbdma_pause(sc->dbdma);
179 sc->sc_ch.dma.flags &= ~ATA_DMA_ACTIVE;
180
181 if (status & DBDMA_STATUS_DEAD) {
182 device_printf(request->parent,"DBDMA dead, resetting "
183 "channel...\n");
184 ata_dbdma_reset(request->parent);
185 return ATA_S_ERROR;
186 }
187
188 if (!(status & DBDMA_STATUS_RUN)) {
189 device_printf(request->parent,"DBDMA confused, stop called "
190 "when channel is not running!\n");
191 return ATA_S_ERROR;
192 }
193
194 if (status & DBDMA_STATUS_ACTIVE) {
195 device_printf(request->parent,"DBDMA channel stopped "
196 "prematurely\n");
197 return ATA_S_ERROR;
198 }
199 return 0;
200 }
201
202 static int
ata_dbdma_load(struct ata_request * request,void * addr,int * entries)203 ata_dbdma_load(struct ata_request *request, void *addr, int *entries)
204 {
205 struct ata_channel *ch = device_get_softc(request->parent);
206 struct ata_dbdma_dmaload_args args;
207
208 int error;
209
210 args.sc = device_get_softc(request->parent);
211 args.write = !(request->flags & ATA_R_READ);
212
213 if (!request->bytecount) {
214 device_printf(request->dev,
215 "FAILURE - zero length DMA transfer attempted\n");
216 return EIO;
217 }
218 if (((uintptr_t)(request->data) & (ch->dma.alignment - 1)) ||
219 (request->bytecount & (ch->dma.alignment - 1))) {
220 device_printf(request->dev,
221 "FAILURE - non aligned DMA transfer attempted\n");
222 return EIO;
223 }
224 if (request->bytecount > ch->dma.max_iosize) {
225 device_printf(request->dev,
226 "FAILURE - oversized DMA transfer attempt %d > %d\n",
227 request->bytecount, ch->dma.max_iosize);
228 return EIO;
229 }
230
231 request->dma = &ch->dma.slot[0];
232
233 if ((error = bus_dmamap_load(request->dma->data_tag,
234 request->dma->data_map, request->data, request->bytecount,
235 &ata_dbdma_setprd, &args, BUS_DMA_NOWAIT))) {
236 device_printf(request->dev, "FAILURE - load data\n");
237 goto error;
238 }
239
240 if (entries)
241 *entries = args.nsegs;
242
243 bus_dmamap_sync(request->dma->sg_tag, request->dma->sg_map,
244 BUS_DMASYNC_PREWRITE);
245 bus_dmamap_sync(request->dma->data_tag, request->dma->data_map,
246 (request->flags & ATA_R_READ) ?
247 BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE);
248
249 return 0;
250
251 error:
252 ch->dma.unload(request);
253 return EIO;
254 }
255
256 void
ata_dbdma_dmainit(device_t dev)257 ata_dbdma_dmainit(device_t dev)
258 {
259 struct ata_dbdma_channel *sc = device_get_softc(dev);
260
261 dbdma_allocate_channel(sc->dbdma_regs, sc->dbdma_offset,
262 bus_get_dma_tag(dev), 256, &sc->dbdma);
263
264 dbdma_set_wait_selector(sc->dbdma,1 << 7, 1 << 7);
265
266 dbdma_insert_stop(sc->dbdma,0);
267 sc->next_dma_slot=1;
268
269 sc->sc_ch.dma.start = ata_dbdma_start;
270 sc->sc_ch.dma.stop = ata_dbdma_stop;
271 sc->sc_ch.dma.load = ata_dbdma_load;
272 sc->sc_ch.dma.reset = ata_dbdma_reset;
273
274 /*
275 * DBDMA's field for transfer size is 16 bits. This will overflow
276 * if we try to do a 64K transfer, so stop short of 64K.
277 */
278 sc->sc_ch.dma.segsize = 126 * DEV_BSIZE;
279 ata_dmainit(dev);
280
281 sc->sc_ch.hw.status = ata_dbdma_status;
282
283 mtx_init(&sc->dbdma_mtx, "ATA DBDMA", NULL, MTX_DEF);
284 }
285