1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2008 Nathan Whitehorn
5 * All rights reserved
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 *
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <sys/param.h>
30 #include <sys/systm.h>
31 #include <sys/kernel.h>
32 #include <sys/malloc.h>
33 #include <sys/module.h>
34 #include <sys/endian.h>
35 #include <sys/bus.h>
36 #include <machine/bus.h>
37 #include <machine/dbdma.h>
38 #include <sys/rman.h>
39
40 #include "dbdmavar.h"
41
42 static MALLOC_DEFINE(M_DBDMA, "dbdma", "DBDMA Command List");
43
44 static uint32_t dbdma_read_reg(dbdma_channel_t *, u_int);
45 static void dbdma_write_reg(dbdma_channel_t *, u_int, uint32_t);
46 static void dbdma_phys_callback(void *, bus_dma_segment_t *, int, int);
47
48 static void
dbdma_phys_callback(void * chan,bus_dma_segment_t * segs,int nsegs,int error)49 dbdma_phys_callback(void *chan, bus_dma_segment_t *segs, int nsegs, int error)
50 {
51 dbdma_channel_t *channel = (dbdma_channel_t *)(chan);
52
53 channel->sc_slots_pa = segs[0].ds_addr;
54 dbdma_write_reg(channel, CHAN_CMDPTR, channel->sc_slots_pa);
55 }
56
57 int
dbdma_allocate_channel(struct resource * dbdma_regs,u_int offset,bus_dma_tag_t parent_dma,int slots,dbdma_channel_t ** chan)58 dbdma_allocate_channel(struct resource *dbdma_regs, u_int offset,
59 bus_dma_tag_t parent_dma, int slots, dbdma_channel_t **chan)
60 {
61 int error = 0;
62 dbdma_channel_t *channel;
63
64 channel = *chan = malloc(sizeof(struct dbdma_channel), M_DBDMA,
65 M_WAITOK | M_ZERO);
66
67 channel->sc_regs = dbdma_regs;
68 channel->sc_off = offset;
69 dbdma_stop(channel);
70
71 channel->sc_slots_pa = 0;
72
73 error = bus_dma_tag_create(parent_dma, 16, 0, BUS_SPACE_MAXADDR_32BIT,
74 BUS_SPACE_MAXADDR, NULL, NULL, PAGE_SIZE, 1, PAGE_SIZE, 0, NULL,
75 NULL, &(channel->sc_dmatag));
76
77 error = bus_dmamem_alloc(channel->sc_dmatag,
78 (void **)&channel->sc_slots, BUS_DMA_WAITOK | BUS_DMA_ZERO,
79 &channel->sc_dmamap);
80
81 error = bus_dmamap_load(channel->sc_dmatag, channel->sc_dmamap,
82 channel->sc_slots, PAGE_SIZE, dbdma_phys_callback, channel, 0);
83
84 dbdma_write_reg(channel, CHAN_CMDPTR_HI, 0);
85
86 channel->sc_nslots = slots;
87
88 return (error);
89 }
90
91 int
dbdma_resize_channel(dbdma_channel_t * chan,int newslots)92 dbdma_resize_channel(dbdma_channel_t *chan, int newslots)
93 {
94
95 if (newslots > (PAGE_SIZE / sizeof(struct dbdma_command)))
96 return (-1);
97
98 chan->sc_nslots = newslots;
99 return (0);
100 }
101
102 int
dbdma_free_channel(dbdma_channel_t * chan)103 dbdma_free_channel(dbdma_channel_t *chan)
104 {
105
106 dbdma_stop(chan);
107
108 bus_dmamem_free(chan->sc_dmatag, chan->sc_slots, chan->sc_dmamap);
109 bus_dma_tag_destroy(chan->sc_dmatag);
110
111 free(chan, M_DBDMA);
112
113 return (0);
114 }
115
116 uint16_t
dbdma_get_cmd_status(dbdma_channel_t * chan,int slot)117 dbdma_get_cmd_status(dbdma_channel_t *chan, int slot)
118 {
119
120 bus_dmamap_sync(chan->sc_dmatag, chan->sc_dmamap, BUS_DMASYNC_POSTREAD);
121
122 /*
123 * I really did mean to swap resCount and xferStatus here, to
124 * account for the quad-word little endian fields.
125 */
126 return (le16toh(chan->sc_slots[slot].resCount));
127 }
128
129 void
dbdma_clear_cmd_status(dbdma_channel_t * chan,int slot)130 dbdma_clear_cmd_status(dbdma_channel_t *chan, int slot)
131 {
132 /* See endian note above */
133 chan->sc_slots[slot].resCount = 0;
134 }
135
136 uint16_t
dbdma_get_residuals(dbdma_channel_t * chan,int slot)137 dbdma_get_residuals(dbdma_channel_t *chan, int slot)
138 {
139
140 bus_dmamap_sync(chan->sc_dmatag, chan->sc_dmamap, BUS_DMASYNC_POSTREAD);
141
142 return (le16toh(chan->sc_slots[slot].xferStatus));
143 }
144
145 void
dbdma_reset(dbdma_channel_t * chan)146 dbdma_reset(dbdma_channel_t *chan)
147 {
148
149 dbdma_stop(chan);
150 dbdma_set_current_cmd(chan, 0);
151 dbdma_run(chan);
152 }
153
154 void
dbdma_run(dbdma_channel_t * chan)155 dbdma_run(dbdma_channel_t *chan)
156 {
157 uint32_t control_reg;
158
159 control_reg = DBDMA_STATUS_RUN | DBDMA_STATUS_PAUSE |
160 DBDMA_STATUS_WAKE | DBDMA_STATUS_DEAD;
161 control_reg <<= DBDMA_REG_MASK_SHIFT;
162
163 control_reg |= DBDMA_STATUS_RUN;
164 dbdma_write_reg(chan, CHAN_CONTROL_REG, control_reg);
165 }
166
167 void
dbdma_pause(dbdma_channel_t * chan)168 dbdma_pause(dbdma_channel_t *chan)
169 {
170 uint32_t control_reg;
171
172 control_reg = DBDMA_STATUS_PAUSE;
173 control_reg <<= DBDMA_REG_MASK_SHIFT;
174
175 control_reg |= DBDMA_STATUS_PAUSE;
176 dbdma_write_reg(chan, CHAN_CONTROL_REG, control_reg);
177 }
178
179 void
dbdma_wake(dbdma_channel_t * chan)180 dbdma_wake(dbdma_channel_t *chan)
181 {
182 uint32_t control_reg;
183
184 control_reg = DBDMA_STATUS_WAKE | DBDMA_STATUS_PAUSE |
185 DBDMA_STATUS_RUN | DBDMA_STATUS_DEAD;
186 control_reg <<= DBDMA_REG_MASK_SHIFT;
187
188 control_reg |= DBDMA_STATUS_WAKE | DBDMA_STATUS_RUN;
189 dbdma_write_reg(chan, CHAN_CONTROL_REG, control_reg);
190 }
191
192 void
dbdma_stop(dbdma_channel_t * chan)193 dbdma_stop(dbdma_channel_t *chan)
194 {
195 uint32_t control_reg;
196
197 control_reg = DBDMA_STATUS_RUN;
198 control_reg <<= DBDMA_REG_MASK_SHIFT;
199
200 dbdma_write_reg(chan, CHAN_CONTROL_REG, control_reg);
201
202 while (dbdma_read_reg(chan, CHAN_STATUS_REG) & DBDMA_STATUS_ACTIVE)
203 DELAY(5);
204 }
205
206 void
dbdma_set_current_cmd(dbdma_channel_t * chan,int slot)207 dbdma_set_current_cmd(dbdma_channel_t *chan, int slot)
208 {
209 uint32_t cmd;
210
211 cmd = chan->sc_slots_pa + slot * sizeof(struct dbdma_command);
212 dbdma_write_reg(chan, CHAN_CMDPTR, cmd);
213 }
214
215 uint16_t
dbdma_get_chan_status(dbdma_channel_t * chan)216 dbdma_get_chan_status(dbdma_channel_t *chan)
217 {
218 uint32_t status_reg;
219
220 status_reg = dbdma_read_reg(chan, CHAN_STATUS_REG);
221 return (status_reg & 0x0000ffff);
222 }
223
224 uint8_t
dbdma_get_device_status(dbdma_channel_t * chan)225 dbdma_get_device_status(dbdma_channel_t *chan)
226 {
227 return (dbdma_get_chan_status(chan) & 0x00ff);
228 }
229
230 void
dbdma_set_device_status(dbdma_channel_t * chan,uint8_t mask,uint8_t value)231 dbdma_set_device_status(dbdma_channel_t *chan, uint8_t mask, uint8_t value)
232 {
233 uint32_t control_reg;
234
235 control_reg = mask;
236 control_reg <<= DBDMA_REG_MASK_SHIFT;
237 control_reg |= value;
238
239 dbdma_write_reg(chan, CHAN_CONTROL_REG, control_reg);
240 }
241
242 void
dbdma_set_interrupt_selector(dbdma_channel_t * chan,uint8_t mask,uint8_t val)243 dbdma_set_interrupt_selector(dbdma_channel_t *chan, uint8_t mask, uint8_t val)
244 {
245 uint32_t intr_select;
246
247 intr_select = mask;
248 intr_select <<= DBDMA_REG_MASK_SHIFT;
249
250 intr_select |= val;
251 dbdma_write_reg(chan, CHAN_INTR_SELECT, intr_select);
252 }
253
254 void
dbdma_set_branch_selector(dbdma_channel_t * chan,uint8_t mask,uint8_t val)255 dbdma_set_branch_selector(dbdma_channel_t *chan, uint8_t mask, uint8_t val)
256 {
257 uint32_t br_select;
258
259 br_select = mask;
260 br_select <<= DBDMA_REG_MASK_SHIFT;
261
262 br_select |= val;
263 dbdma_write_reg(chan, CHAN_BRANCH_SELECT, br_select);
264 }
265
266 void
dbdma_set_wait_selector(dbdma_channel_t * chan,uint8_t mask,uint8_t val)267 dbdma_set_wait_selector(dbdma_channel_t *chan, uint8_t mask, uint8_t val)
268 {
269 uint32_t wait_select;
270
271 wait_select = mask;
272 wait_select <<= DBDMA_REG_MASK_SHIFT;
273 wait_select |= val;
274 dbdma_write_reg(chan, CHAN_WAIT_SELECT, wait_select);
275 }
276
277 void
dbdma_insert_command(dbdma_channel_t * chan,int slot,int command,int stream,bus_addr_t data,size_t count,uint8_t interrupt,uint8_t branch,uint8_t wait,uint32_t branch_slot)278 dbdma_insert_command(dbdma_channel_t *chan, int slot, int command, int stream,
279 bus_addr_t data, size_t count, uint8_t interrupt, uint8_t branch,
280 uint8_t wait, uint32_t branch_slot)
281 {
282 struct dbdma_command cmd;
283 uint32_t *flip;
284
285 cmd.cmd = command;
286 cmd.key = stream;
287 cmd.intr = interrupt;
288 cmd.branch = branch;
289 cmd.wait = wait;
290
291 cmd.reqCount = count;
292 cmd.address = (uint32_t)(data);
293 if (command != DBDMA_STORE_QUAD && command != DBDMA_LOAD_QUAD)
294 cmd.cmdDep = chan->sc_slots_pa +
295 branch_slot * sizeof(struct dbdma_command);
296 else
297 cmd.cmdDep = branch_slot;
298
299 cmd.resCount = 0;
300 cmd.xferStatus = 0;
301
302 /*
303 * Move quadwords to little-endian. God only knows why
304 * Apple thought this was a good idea.
305 */
306 flip = (uint32_t *)(&cmd);
307 flip[0] = htole32(flip[0]);
308 flip[1] = htole32(flip[1]);
309 flip[2] = htole32(flip[2]);
310
311 chan->sc_slots[slot] = cmd;
312 }
313
314 void
dbdma_insert_stop(dbdma_channel_t * chan,int slot)315 dbdma_insert_stop(dbdma_channel_t *chan, int slot)
316 {
317
318 dbdma_insert_command(chan, slot, DBDMA_STOP, 0, 0, 0, DBDMA_NEVER,
319 DBDMA_NEVER, DBDMA_NEVER, 0);
320 }
321
322 void
dbdma_insert_nop(dbdma_channel_t * chan,int slot)323 dbdma_insert_nop(dbdma_channel_t *chan, int slot)
324 {
325
326 dbdma_insert_command(chan, slot, DBDMA_NOP, 0, 0, 0, DBDMA_NEVER,
327 DBDMA_NEVER, DBDMA_NEVER, 0);
328 }
329
330 void
dbdma_insert_branch(dbdma_channel_t * chan,int slot,int to_slot)331 dbdma_insert_branch(dbdma_channel_t *chan, int slot, int to_slot)
332 {
333
334 dbdma_insert_command(chan, slot, DBDMA_NOP, 0, 0, 0, DBDMA_NEVER,
335 DBDMA_ALWAYS, DBDMA_NEVER, to_slot);
336 }
337
338 void
dbdma_sync_commands(dbdma_channel_t * chan,bus_dmasync_op_t op)339 dbdma_sync_commands(dbdma_channel_t *chan, bus_dmasync_op_t op)
340 {
341
342 bus_dmamap_sync(chan->sc_dmatag, chan->sc_dmamap, op);
343 }
344
345 void
dbdma_save_state(dbdma_channel_t * chan)346 dbdma_save_state(dbdma_channel_t *chan)
347 {
348
349 chan->sc_saved_regs[0] = dbdma_read_reg(chan, CHAN_CMDPTR);
350 chan->sc_saved_regs[1] = dbdma_read_reg(chan, CHAN_CMDPTR_HI);
351 chan->sc_saved_regs[2] = dbdma_read_reg(chan, CHAN_INTR_SELECT);
352 chan->sc_saved_regs[3] = dbdma_read_reg(chan, CHAN_BRANCH_SELECT);
353 chan->sc_saved_regs[4] = dbdma_read_reg(chan, CHAN_WAIT_SELECT);
354
355 dbdma_stop(chan);
356 }
357
358 void
dbdma_restore_state(dbdma_channel_t * chan)359 dbdma_restore_state(dbdma_channel_t *chan)
360 {
361
362 dbdma_wake(chan);
363 dbdma_write_reg(chan, CHAN_CMDPTR, chan->sc_saved_regs[0]);
364 dbdma_write_reg(chan, CHAN_CMDPTR_HI, chan->sc_saved_regs[1]);
365 dbdma_write_reg(chan, CHAN_INTR_SELECT, chan->sc_saved_regs[2]);
366 dbdma_write_reg(chan, CHAN_BRANCH_SELECT, chan->sc_saved_regs[3]);
367 dbdma_write_reg(chan, CHAN_WAIT_SELECT, chan->sc_saved_regs[4]);
368 }
369
370 static uint32_t
dbdma_read_reg(dbdma_channel_t * chan,u_int offset)371 dbdma_read_reg(dbdma_channel_t *chan, u_int offset)
372 {
373
374 return (bus_read_4(chan->sc_regs, chan->sc_off + offset));
375 }
376
377 static void
dbdma_write_reg(dbdma_channel_t * chan,u_int offset,uint32_t val)378 dbdma_write_reg(dbdma_channel_t *chan, u_int offset, uint32_t val)
379 {
380
381 bus_write_4(chan->sc_regs, chan->sc_off + offset, val);
382 }
383