1af3dc4a7SPedro F. Giffuni /*-
2*4d846d26SWarner Losh * SPDX-License-Identifier: BSD-2-Clause
3af3dc4a7SPedro F. Giffuni *
4cdafe74eSOleksandr Tymoshenko * Copyright (c) 2013 Daisuke Aoyama <aoyama@peach.ne.jp>
5cdafe74eSOleksandr Tymoshenko * Copyright (c) 2013 Oleksandr Tymoshenko <gonzo@bluezbox.com>
6cdafe74eSOleksandr Tymoshenko *
7cdafe74eSOleksandr Tymoshenko * Redistribution and use in source and binary forms, with or without
8cdafe74eSOleksandr Tymoshenko * modification, are permitted provided that the following conditions
9cdafe74eSOleksandr Tymoshenko * are met:
10cdafe74eSOleksandr Tymoshenko * 1. Redistributions of source code must retain the above copyright
11cdafe74eSOleksandr Tymoshenko * notice, this list of conditions and the following disclaimer.
12cdafe74eSOleksandr Tymoshenko * 2. Redistributions in binary form must reproduce the above copyright
13cdafe74eSOleksandr Tymoshenko * notice, this list of conditions and the following disclaimer in the
14cdafe74eSOleksandr Tymoshenko * documentation and/or other materials provided with the distribution.
15cdafe74eSOleksandr Tymoshenko *
16cdafe74eSOleksandr Tymoshenko * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17cdafe74eSOleksandr Tymoshenko * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18cdafe74eSOleksandr Tymoshenko * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19cdafe74eSOleksandr Tymoshenko * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20cdafe74eSOleksandr Tymoshenko * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21cdafe74eSOleksandr Tymoshenko * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22cdafe74eSOleksandr Tymoshenko * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23cdafe74eSOleksandr Tymoshenko * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24cdafe74eSOleksandr Tymoshenko * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25cdafe74eSOleksandr Tymoshenko * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26cdafe74eSOleksandr Tymoshenko * SUCH DAMAGE.
27cdafe74eSOleksandr Tymoshenko *
28cdafe74eSOleksandr Tymoshenko */
29cdafe74eSOleksandr Tymoshenko
30cdafe74eSOleksandr Tymoshenko #include <sys/param.h>
31cdafe74eSOleksandr Tymoshenko #include <sys/systm.h>
32cdafe74eSOleksandr Tymoshenko #include <sys/bus.h>
33cdafe74eSOleksandr Tymoshenko #include <sys/kernel.h>
34cdafe74eSOleksandr Tymoshenko #include <sys/lock.h>
35cdafe74eSOleksandr Tymoshenko #include <sys/malloc.h>
36cdafe74eSOleksandr Tymoshenko #include <sys/module.h>
37cdafe74eSOleksandr Tymoshenko #include <sys/mutex.h>
38cdafe74eSOleksandr Tymoshenko #include <sys/queue.h>
39cdafe74eSOleksandr Tymoshenko #include <sys/resource.h>
40cdafe74eSOleksandr Tymoshenko #include <sys/rman.h>
41cdafe74eSOleksandr Tymoshenko
42cdafe74eSOleksandr Tymoshenko #include <dev/ofw/openfirm.h>
43cdafe74eSOleksandr Tymoshenko #include <dev/ofw/ofw_bus.h>
44cdafe74eSOleksandr Tymoshenko #include <dev/ofw/ofw_bus_subr.h>
45cdafe74eSOleksandr Tymoshenko
46cdafe74eSOleksandr Tymoshenko #include <vm/vm.h>
47cdafe74eSOleksandr Tymoshenko #include <vm/pmap.h>
48cdafe74eSOleksandr Tymoshenko #include <machine/bus.h>
49cdafe74eSOleksandr Tymoshenko
50cdafe74eSOleksandr Tymoshenko #include "bcm2835_dma.h"
51cdafe74eSOleksandr Tymoshenko #include "bcm2835_vcbus.h"
52cdafe74eSOleksandr Tymoshenko
53cdafe74eSOleksandr Tymoshenko #define MAX_REG 9
54cdafe74eSOleksandr Tymoshenko
55cdafe74eSOleksandr Tymoshenko /* private flags */
56cdafe74eSOleksandr Tymoshenko #define BCM_DMA_CH_USED 0x00000001
57cdafe74eSOleksandr Tymoshenko #define BCM_DMA_CH_FREE 0x40000000
58cdafe74eSOleksandr Tymoshenko #define BCM_DMA_CH_UNMAP 0x80000000
59cdafe74eSOleksandr Tymoshenko
60cdafe74eSOleksandr Tymoshenko /* Register Map (4.2.1.2) */
61cdafe74eSOleksandr Tymoshenko #define BCM_DMA_CS(n) (0x100*(n) + 0x00)
62cdafe74eSOleksandr Tymoshenko #define CS_ACTIVE (1 << 0)
63cdafe74eSOleksandr Tymoshenko #define CS_END (1 << 1)
64cdafe74eSOleksandr Tymoshenko #define CS_INT (1 << 2)
65cdafe74eSOleksandr Tymoshenko #define CS_DREQ (1 << 3)
66cdafe74eSOleksandr Tymoshenko #define CS_ISPAUSED (1 << 4)
67cdafe74eSOleksandr Tymoshenko #define CS_ISHELD (1 << 5)
68cdafe74eSOleksandr Tymoshenko #define CS_ISWAIT (1 << 6)
69cdafe74eSOleksandr Tymoshenko #define CS_ERR (1 << 8)
70cdafe74eSOleksandr Tymoshenko #define CS_WAITWRT (1 << 28)
71cdafe74eSOleksandr Tymoshenko #define CS_DISDBG (1 << 29)
72cdafe74eSOleksandr Tymoshenko #define CS_ABORT (1 << 30)
737a22215cSEitan Adler #define CS_RESET (1U << 31)
74cdafe74eSOleksandr Tymoshenko #define BCM_DMA_CBADDR(n) (0x100*(n) + 0x04)
75cdafe74eSOleksandr Tymoshenko #define BCM_DMA_INFO(n) (0x100*(n) + 0x08)
76cdafe74eSOleksandr Tymoshenko #define INFO_INT_EN (1 << 0)
77cdafe74eSOleksandr Tymoshenko #define INFO_TDMODE (1 << 1)
78cdafe74eSOleksandr Tymoshenko #define INFO_WAIT_RESP (1 << 3)
79cdafe74eSOleksandr Tymoshenko #define INFO_D_INC (1 << 4)
80cdafe74eSOleksandr Tymoshenko #define INFO_D_WIDTH (1 << 5)
81cdafe74eSOleksandr Tymoshenko #define INFO_D_DREQ (1 << 6)
82cdafe74eSOleksandr Tymoshenko #define INFO_S_INC (1 << 8)
83cdafe74eSOleksandr Tymoshenko #define INFO_S_WIDTH (1 << 9)
84cdafe74eSOleksandr Tymoshenko #define INFO_S_DREQ (1 << 10)
85cdafe74eSOleksandr Tymoshenko #define INFO_WAITS_SHIFT (21)
86cdafe74eSOleksandr Tymoshenko #define INFO_PERMAP_SHIFT (16)
87cdafe74eSOleksandr Tymoshenko #define INFO_PERMAP_MASK (0x1f << INFO_PERMAP_SHIFT)
88cdafe74eSOleksandr Tymoshenko
89cdafe74eSOleksandr Tymoshenko #define BCM_DMA_SRC(n) (0x100*(n) + 0x0C)
90cdafe74eSOleksandr Tymoshenko #define BCM_DMA_DST(n) (0x100*(n) + 0x10)
91cdafe74eSOleksandr Tymoshenko #define BCM_DMA_LEN(n) (0x100*(n) + 0x14)
92cdafe74eSOleksandr Tymoshenko #define BCM_DMA_STRIDE(n) (0x100*(n) + 0x18)
93cdafe74eSOleksandr Tymoshenko #define BCM_DMA_CBNEXT(n) (0x100*(n) + 0x1C)
94cdafe74eSOleksandr Tymoshenko #define BCM_DMA_DEBUG(n) (0x100*(n) + 0x20)
95cdafe74eSOleksandr Tymoshenko #define DEBUG_ERROR_MASK (7)
96cdafe74eSOleksandr Tymoshenko
97cdafe74eSOleksandr Tymoshenko #define BCM_DMA_INT_STATUS 0xfe0
98cdafe74eSOleksandr Tymoshenko #define BCM_DMA_ENABLE 0xff0
99cdafe74eSOleksandr Tymoshenko
100cdafe74eSOleksandr Tymoshenko /* relative offset from BCM_VC_DMA0_BASE (p.39) */
101cdafe74eSOleksandr Tymoshenko #define BCM_DMA_CH(n) (0x100*(n))
102cdafe74eSOleksandr Tymoshenko
103ec950d0aSSvatopluk Kraus /* channels used by GPU */
104ec950d0aSSvatopluk Kraus #define BCM_DMA_CH_BULK 0
105ec950d0aSSvatopluk Kraus #define BCM_DMA_CH_FAST1 2
106ec950d0aSSvatopluk Kraus #define BCM_DMA_CH_FAST2 3
107ec950d0aSSvatopluk Kraus
108ec950d0aSSvatopluk Kraus #define BCM_DMA_CH_GPU_MASK ((1 << BCM_DMA_CH_BULK) | \
109ec950d0aSSvatopluk Kraus (1 << BCM_DMA_CH_FAST1) | \
110ec950d0aSSvatopluk Kraus (1 << BCM_DMA_CH_FAST2))
111ec950d0aSSvatopluk Kraus
112cdafe74eSOleksandr Tymoshenko /* DMA Control Block - 256bit aligned (p.40) */
113cdafe74eSOleksandr Tymoshenko struct bcm_dma_cb {
114cdafe74eSOleksandr Tymoshenko uint32_t info; /* Transfer Information */
115cdafe74eSOleksandr Tymoshenko uint32_t src; /* Source Address */
116cdafe74eSOleksandr Tymoshenko uint32_t dst; /* Destination Address */
117cdafe74eSOleksandr Tymoshenko uint32_t len; /* Transfer Length */
118cdafe74eSOleksandr Tymoshenko uint32_t stride; /* 2D Mode Stride */
119cdafe74eSOleksandr Tymoshenko uint32_t next; /* Next Control Block Address */
120cdafe74eSOleksandr Tymoshenko uint32_t rsvd1; /* Reserved */
121cdafe74eSOleksandr Tymoshenko uint32_t rsvd2; /* Reserved */
122cdafe74eSOleksandr Tymoshenko };
123cdafe74eSOleksandr Tymoshenko
124cdafe74eSOleksandr Tymoshenko #ifdef DEBUG
125cdafe74eSOleksandr Tymoshenko static void bcm_dma_cb_dump(struct bcm_dma_cb *cb);
126cdafe74eSOleksandr Tymoshenko static void bcm_dma_reg_dump(int ch);
127cdafe74eSOleksandr Tymoshenko #endif
128cdafe74eSOleksandr Tymoshenko
129cdafe74eSOleksandr Tymoshenko /* DMA channel private info */
130cdafe74eSOleksandr Tymoshenko struct bcm_dma_ch {
131cdafe74eSOleksandr Tymoshenko int ch;
132cdafe74eSOleksandr Tymoshenko uint32_t flags;
133cdafe74eSOleksandr Tymoshenko struct bcm_dma_cb * cb;
134cdafe74eSOleksandr Tymoshenko uint32_t vc_cb;
135cdafe74eSOleksandr Tymoshenko bus_dmamap_t dma_map;
136cdafe74eSOleksandr Tymoshenko void (*intr_func)(int, void *);
137cdafe74eSOleksandr Tymoshenko void * intr_arg;
138cdafe74eSOleksandr Tymoshenko };
139cdafe74eSOleksandr Tymoshenko
140cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc {
141cdafe74eSOleksandr Tymoshenko device_t sc_dev;
142cdafe74eSOleksandr Tymoshenko struct mtx sc_mtx;
143cdafe74eSOleksandr Tymoshenko struct resource * sc_mem;
144cdafe74eSOleksandr Tymoshenko struct resource * sc_irq[BCM_DMA_CH_MAX];
145cdafe74eSOleksandr Tymoshenko void * sc_intrhand[BCM_DMA_CH_MAX];
146cdafe74eSOleksandr Tymoshenko struct bcm_dma_ch sc_dma_ch[BCM_DMA_CH_MAX];
147cdafe74eSOleksandr Tymoshenko bus_dma_tag_t sc_dma_tag;
148cdafe74eSOleksandr Tymoshenko };
149cdafe74eSOleksandr Tymoshenko
150cdafe74eSOleksandr Tymoshenko static struct bcm_dma_softc *bcm_dma_sc = NULL;
151ec950d0aSSvatopluk Kraus static uint32_t bcm_dma_channel_mask;
152cdafe74eSOleksandr Tymoshenko
1539d6eb8bbSOleksandr Tymoshenko static struct ofw_compat_data compat_data[] = {
1549d6eb8bbSOleksandr Tymoshenko {"broadcom,bcm2835-dma", 1},
1559d6eb8bbSOleksandr Tymoshenko {"brcm,bcm2835-dma", 1},
1569d6eb8bbSOleksandr Tymoshenko {NULL, 0}
1579d6eb8bbSOleksandr Tymoshenko };
1589d6eb8bbSOleksandr Tymoshenko
159cdafe74eSOleksandr Tymoshenko static void
bcm_dmamap_cb(void * arg,bus_dma_segment_t * segs,int nseg,int err)160cdafe74eSOleksandr Tymoshenko bcm_dmamap_cb(void *arg, bus_dma_segment_t *segs,
161cdafe74eSOleksandr Tymoshenko int nseg, int err)
162cdafe74eSOleksandr Tymoshenko {
163cdafe74eSOleksandr Tymoshenko bus_addr_t *addr;
164cdafe74eSOleksandr Tymoshenko
165cdafe74eSOleksandr Tymoshenko if (err)
166cdafe74eSOleksandr Tymoshenko return;
167cdafe74eSOleksandr Tymoshenko
168cdafe74eSOleksandr Tymoshenko addr = (bus_addr_t*)arg;
16940084ac3SKyle Evans *addr = ARMC_TO_VCBUS(segs[0].ds_addr);
170cdafe74eSOleksandr Tymoshenko }
171cdafe74eSOleksandr Tymoshenko
172cdafe74eSOleksandr Tymoshenko static void
bcm_dma_reset(device_t dev,int ch)173cdafe74eSOleksandr Tymoshenko bcm_dma_reset(device_t dev, int ch)
174cdafe74eSOleksandr Tymoshenko {
175cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc *sc = device_get_softc(dev);
176cdafe74eSOleksandr Tymoshenko struct bcm_dma_cb *cb;
177cdafe74eSOleksandr Tymoshenko uint32_t cs;
178cdafe74eSOleksandr Tymoshenko int count;
179cdafe74eSOleksandr Tymoshenko
180cdafe74eSOleksandr Tymoshenko if (ch < 0 || ch >= BCM_DMA_CH_MAX)
181cdafe74eSOleksandr Tymoshenko return;
182cdafe74eSOleksandr Tymoshenko
183cdafe74eSOleksandr Tymoshenko cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch));
184cdafe74eSOleksandr Tymoshenko
185cdafe74eSOleksandr Tymoshenko if (cs & CS_ACTIVE) {
186cdafe74eSOleksandr Tymoshenko /* pause current task */
187cdafe74eSOleksandr Tymoshenko bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), 0);
188cdafe74eSOleksandr Tymoshenko
189cdafe74eSOleksandr Tymoshenko count = 1000;
190cdafe74eSOleksandr Tymoshenko do {
191cdafe74eSOleksandr Tymoshenko cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch));
192cdafe74eSOleksandr Tymoshenko } while (!(cs & CS_ISPAUSED) && (count-- > 0));
193cdafe74eSOleksandr Tymoshenko
194cdafe74eSOleksandr Tymoshenko if (!(cs & CS_ISPAUSED)) {
195cdafe74eSOleksandr Tymoshenko device_printf(dev,
196cdafe74eSOleksandr Tymoshenko "Can't abort DMA transfer at channel %d\n", ch);
197cdafe74eSOleksandr Tymoshenko }
198cdafe74eSOleksandr Tymoshenko
199cdafe74eSOleksandr Tymoshenko bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0);
200cdafe74eSOleksandr Tymoshenko
201cdafe74eSOleksandr Tymoshenko /* Complete everything, clear interrupt */
202cdafe74eSOleksandr Tymoshenko bus_write_4(sc->sc_mem, BCM_DMA_CS(ch),
203cdafe74eSOleksandr Tymoshenko CS_ABORT | CS_INT | CS_END| CS_ACTIVE);
204cdafe74eSOleksandr Tymoshenko }
205cdafe74eSOleksandr Tymoshenko
206cdafe74eSOleksandr Tymoshenko /* clear control blocks */
207cdafe74eSOleksandr Tymoshenko bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch), 0);
208cdafe74eSOleksandr Tymoshenko bus_write_4(sc->sc_mem, BCM_DMA_CBNEXT(ch), 0);
209cdafe74eSOleksandr Tymoshenko
210cdafe74eSOleksandr Tymoshenko /* Reset control block */
211cdafe74eSOleksandr Tymoshenko cb = sc->sc_dma_ch[ch].cb;
212578acad3SEitan Adler bzero(cb, sizeof(*cb));
213e9401a9eSOleksandr Tymoshenko cb->info = INFO_WAIT_RESP;
214cdafe74eSOleksandr Tymoshenko }
215cdafe74eSOleksandr Tymoshenko
216cdafe74eSOleksandr Tymoshenko static int
bcm_dma_init(device_t dev)217cdafe74eSOleksandr Tymoshenko bcm_dma_init(device_t dev)
218cdafe74eSOleksandr Tymoshenko {
219cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc *sc = device_get_softc(dev);
220ec950d0aSSvatopluk Kraus uint32_t reg;
221cdafe74eSOleksandr Tymoshenko struct bcm_dma_ch *ch;
222cdafe74eSOleksandr Tymoshenko void *cb_virt;
223cdafe74eSOleksandr Tymoshenko vm_paddr_t cb_phys;
224cdafe74eSOleksandr Tymoshenko int err;
225cdafe74eSOleksandr Tymoshenko int i;
226cdafe74eSOleksandr Tymoshenko
227ec950d0aSSvatopluk Kraus /*
228ec950d0aSSvatopluk Kraus * Only channels set in bcm_dma_channel_mask can be controlled by us.
229ec950d0aSSvatopluk Kraus * The others are out of our control as well as the corresponding bits
230ec950d0aSSvatopluk Kraus * in both BCM_DMA_ENABLE and BCM_DMA_INT_STATUS global registers. As
231ec950d0aSSvatopluk Kraus * these registers are RW ones, there is no safe way how to write only
232ec950d0aSSvatopluk Kraus * the bits which can be controlled by us.
233ec950d0aSSvatopluk Kraus *
234ec950d0aSSvatopluk Kraus * Fortunately, after reset, all channels are enabled in BCM_DMA_ENABLE
235ec950d0aSSvatopluk Kraus * register and all statuses are cleared in BCM_DMA_INT_STATUS one.
236ec950d0aSSvatopluk Kraus * Not touching these registers is a trade off between correct
237ec950d0aSSvatopluk Kraus * initialization which does not count on anything and not messing up
238ec950d0aSSvatopluk Kraus * something we have no control over.
239ec950d0aSSvatopluk Kraus */
240ec950d0aSSvatopluk Kraus reg = bus_read_4(sc->sc_mem, BCM_DMA_ENABLE);
241ec950d0aSSvatopluk Kraus if ((reg & bcm_dma_channel_mask) != bcm_dma_channel_mask)
242ec950d0aSSvatopluk Kraus device_printf(dev, "channels are not enabled\n");
243ec950d0aSSvatopluk Kraus reg = bus_read_4(sc->sc_mem, BCM_DMA_INT_STATUS);
244ec950d0aSSvatopluk Kraus if ((reg & bcm_dma_channel_mask) != 0)
245ec950d0aSSvatopluk Kraus device_printf(dev, "statuses are not cleared\n");
246cdafe74eSOleksandr Tymoshenko
24740084ac3SKyle Evans /*
24840084ac3SKyle Evans * Allocate DMA chunks control blocks based on p.40 of the peripheral
24940084ac3SKyle Evans * spec - control block should be 32-bit aligned. The DMA controller
25040084ac3SKyle Evans * has a full 32-bit register dedicated to this address, so we do not
25140084ac3SKyle Evans * need to bother with the per-SoC peripheral restrictions.
25240084ac3SKyle Evans */
253cdafe74eSOleksandr Tymoshenko err = bus_dma_tag_create(bus_get_dma_tag(dev),
254cdafe74eSOleksandr Tymoshenko 1, 0, BUS_SPACE_MAXADDR_32BIT,
255cdafe74eSOleksandr Tymoshenko BUS_SPACE_MAXADDR, NULL, NULL,
256cdafe74eSOleksandr Tymoshenko sizeof(struct bcm_dma_cb), 1,
257cdafe74eSOleksandr Tymoshenko sizeof(struct bcm_dma_cb),
258cdafe74eSOleksandr Tymoshenko BUS_DMA_ALLOCNOW, NULL, NULL,
259cdafe74eSOleksandr Tymoshenko &sc->sc_dma_tag);
260cdafe74eSOleksandr Tymoshenko
261cdafe74eSOleksandr Tymoshenko if (err) {
262ec950d0aSSvatopluk Kraus device_printf(dev, "failed allocate DMA tag\n");
263cdafe74eSOleksandr Tymoshenko return (err);
264cdafe74eSOleksandr Tymoshenko }
265cdafe74eSOleksandr Tymoshenko
266cdafe74eSOleksandr Tymoshenko /* setup initial settings */
267cdafe74eSOleksandr Tymoshenko for (i = 0; i < BCM_DMA_CH_MAX; i++) {
268cdafe74eSOleksandr Tymoshenko ch = &sc->sc_dma_ch[i];
269cdafe74eSOleksandr Tymoshenko
270ec950d0aSSvatopluk Kraus bzero(ch, sizeof(struct bcm_dma_ch));
271ec950d0aSSvatopluk Kraus ch->ch = i;
272ec950d0aSSvatopluk Kraus ch->flags = BCM_DMA_CH_UNMAP;
273ec950d0aSSvatopluk Kraus
274ec950d0aSSvatopluk Kraus if ((bcm_dma_channel_mask & (1 << i)) == 0)
275ec950d0aSSvatopluk Kraus continue;
276ec950d0aSSvatopluk Kraus
277cdafe74eSOleksandr Tymoshenko err = bus_dmamem_alloc(sc->sc_dma_tag, &cb_virt,
278cdafe74eSOleksandr Tymoshenko BUS_DMA_WAITOK | BUS_DMA_COHERENT | BUS_DMA_ZERO,
279cdafe74eSOleksandr Tymoshenko &ch->dma_map);
280cdafe74eSOleksandr Tymoshenko if (err) {
281cdafe74eSOleksandr Tymoshenko device_printf(dev, "cannot allocate DMA memory\n");
282cdafe74eSOleksandr Tymoshenko break;
283cdafe74eSOleksandr Tymoshenko }
284cdafe74eSOleksandr Tymoshenko
285cdafe74eSOleksandr Tymoshenko /*
286cdafe74eSOleksandr Tymoshenko * Least alignment for busdma-allocated stuff is cache
287255eff3bSPedro F. Giffuni * line size, so just make sure nothing stupid happened
288cdafe74eSOleksandr Tymoshenko * and we got properly aligned address
289cdafe74eSOleksandr Tymoshenko */
290cdafe74eSOleksandr Tymoshenko if ((uintptr_t)cb_virt & 0x1f) {
291cdafe74eSOleksandr Tymoshenko device_printf(dev,
292cdafe74eSOleksandr Tymoshenko "DMA address is not 32-bytes aligned: %p\n",
293cdafe74eSOleksandr Tymoshenko (void*)cb_virt);
294cdafe74eSOleksandr Tymoshenko break;
295cdafe74eSOleksandr Tymoshenko }
296cdafe74eSOleksandr Tymoshenko
297cdafe74eSOleksandr Tymoshenko err = bus_dmamap_load(sc->sc_dma_tag, ch->dma_map, cb_virt,
298cdafe74eSOleksandr Tymoshenko sizeof(struct bcm_dma_cb), bcm_dmamap_cb, &cb_phys,
299cdafe74eSOleksandr Tymoshenko BUS_DMA_WAITOK);
300cdafe74eSOleksandr Tymoshenko if (err) {
301cdafe74eSOleksandr Tymoshenko device_printf(dev, "cannot load DMA memory\n");
302cdafe74eSOleksandr Tymoshenko break;
303cdafe74eSOleksandr Tymoshenko }
304cdafe74eSOleksandr Tymoshenko
305cdafe74eSOleksandr Tymoshenko ch->cb = cb_virt;
306cdafe74eSOleksandr Tymoshenko ch->vc_cb = cb_phys;
307ec950d0aSSvatopluk Kraus ch->flags = BCM_DMA_CH_FREE;
308cdafe74eSOleksandr Tymoshenko ch->cb->info = INFO_WAIT_RESP;
309cdafe74eSOleksandr Tymoshenko
310cdafe74eSOleksandr Tymoshenko /* reset DMA engine */
311ec950d0aSSvatopluk Kraus bus_write_4(sc->sc_mem, BCM_DMA_CS(i), CS_RESET);
312cdafe74eSOleksandr Tymoshenko }
313cdafe74eSOleksandr Tymoshenko
314cdafe74eSOleksandr Tymoshenko return (0);
315cdafe74eSOleksandr Tymoshenko }
316cdafe74eSOleksandr Tymoshenko
317cdafe74eSOleksandr Tymoshenko /*
318cdafe74eSOleksandr Tymoshenko * Allocate DMA channel for further use, returns channel # or
319cdafe74eSOleksandr Tymoshenko * BCM_DMA_CH_INVALID
320cdafe74eSOleksandr Tymoshenko */
321cdafe74eSOleksandr Tymoshenko int
bcm_dma_allocate(int req_ch)322cdafe74eSOleksandr Tymoshenko bcm_dma_allocate(int req_ch)
323cdafe74eSOleksandr Tymoshenko {
324cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc *sc = bcm_dma_sc;
325cdafe74eSOleksandr Tymoshenko int ch = BCM_DMA_CH_INVALID;
326cdafe74eSOleksandr Tymoshenko int i;
327cdafe74eSOleksandr Tymoshenko
32807ccf714SMitchell Horne if (sc == NULL)
32907ccf714SMitchell Horne return (BCM_DMA_CH_INVALID);
33007ccf714SMitchell Horne
331cdafe74eSOleksandr Tymoshenko if (req_ch >= BCM_DMA_CH_MAX)
332cdafe74eSOleksandr Tymoshenko return (BCM_DMA_CH_INVALID);
333cdafe74eSOleksandr Tymoshenko
334cdafe74eSOleksandr Tymoshenko /* Auto(req_ch < 0) or CH specified */
335cdafe74eSOleksandr Tymoshenko mtx_lock(&sc->sc_mtx);
336cdafe74eSOleksandr Tymoshenko
337cdafe74eSOleksandr Tymoshenko if (req_ch < 0) {
338cdafe74eSOleksandr Tymoshenko for (i = 0; i < BCM_DMA_CH_MAX; i++) {
339cdafe74eSOleksandr Tymoshenko if (sc->sc_dma_ch[i].flags & BCM_DMA_CH_FREE) {
340cdafe74eSOleksandr Tymoshenko ch = i;
341cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE;
342cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED;
343cdafe74eSOleksandr Tymoshenko break;
344cdafe74eSOleksandr Tymoshenko }
345cdafe74eSOleksandr Tymoshenko }
34607ccf714SMitchell Horne } else if (sc->sc_dma_ch[req_ch].flags & BCM_DMA_CH_FREE) {
347cdafe74eSOleksandr Tymoshenko ch = req_ch;
348cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_FREE;
349cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_USED;
350cdafe74eSOleksandr Tymoshenko }
351cdafe74eSOleksandr Tymoshenko
352cdafe74eSOleksandr Tymoshenko mtx_unlock(&sc->sc_mtx);
353cdafe74eSOleksandr Tymoshenko return (ch);
354cdafe74eSOleksandr Tymoshenko }
355cdafe74eSOleksandr Tymoshenko
356cdafe74eSOleksandr Tymoshenko /*
357cdafe74eSOleksandr Tymoshenko * Frees allocated channel. Returns 0 on success, -1 otherwise
358cdafe74eSOleksandr Tymoshenko */
359cdafe74eSOleksandr Tymoshenko int
bcm_dma_free(int ch)360cdafe74eSOleksandr Tymoshenko bcm_dma_free(int ch)
361cdafe74eSOleksandr Tymoshenko {
362cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc *sc = bcm_dma_sc;
363cdafe74eSOleksandr Tymoshenko
36407ccf714SMitchell Horne if (sc == NULL)
36507ccf714SMitchell Horne return (-1);
36607ccf714SMitchell Horne
367cdafe74eSOleksandr Tymoshenko if (ch < 0 || ch >= BCM_DMA_CH_MAX)
368cdafe74eSOleksandr Tymoshenko return (-1);
369cdafe74eSOleksandr Tymoshenko
370cdafe74eSOleksandr Tymoshenko mtx_lock(&sc->sc_mtx);
371cdafe74eSOleksandr Tymoshenko if (sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED) {
372cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].flags |= BCM_DMA_CH_FREE;
373cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].flags &= ~BCM_DMA_CH_USED;
374cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].intr_func = NULL;
375cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].intr_arg = NULL;
376cdafe74eSOleksandr Tymoshenko
377cdafe74eSOleksandr Tymoshenko /* reset DMA engine */
378cdafe74eSOleksandr Tymoshenko bcm_dma_reset(sc->sc_dev, ch);
379cdafe74eSOleksandr Tymoshenko }
380cdafe74eSOleksandr Tymoshenko
381cdafe74eSOleksandr Tymoshenko mtx_unlock(&sc->sc_mtx);
382cdafe74eSOleksandr Tymoshenko return (0);
383cdafe74eSOleksandr Tymoshenko }
384cdafe74eSOleksandr Tymoshenko
385cdafe74eSOleksandr Tymoshenko /*
386cdafe74eSOleksandr Tymoshenko * Assign handler function for channel interrupt
387cdafe74eSOleksandr Tymoshenko * Returns 0 on success, -1 otherwise
388cdafe74eSOleksandr Tymoshenko */
389cdafe74eSOleksandr Tymoshenko int
bcm_dma_setup_intr(int ch,void (* func)(int,void *),void * arg)390cdafe74eSOleksandr Tymoshenko bcm_dma_setup_intr(int ch, void (*func)(int, void *), void *arg)
391cdafe74eSOleksandr Tymoshenko {
392cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc *sc = bcm_dma_sc;
393cdafe74eSOleksandr Tymoshenko struct bcm_dma_cb *cb;
394cdafe74eSOleksandr Tymoshenko
39507ccf714SMitchell Horne if (sc == NULL)
39607ccf714SMitchell Horne return (-1);
39707ccf714SMitchell Horne
398cdafe74eSOleksandr Tymoshenko if (ch < 0 || ch >= BCM_DMA_CH_MAX)
399cdafe74eSOleksandr Tymoshenko return (-1);
400cdafe74eSOleksandr Tymoshenko
401cdafe74eSOleksandr Tymoshenko if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED))
402cdafe74eSOleksandr Tymoshenko return (-1);
403cdafe74eSOleksandr Tymoshenko
404cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].intr_func = func;
405cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].intr_arg = arg;
406cdafe74eSOleksandr Tymoshenko cb = sc->sc_dma_ch[ch].cb;
407cdafe74eSOleksandr Tymoshenko cb->info |= INFO_INT_EN;
408cdafe74eSOleksandr Tymoshenko
409cdafe74eSOleksandr Tymoshenko return (0);
410cdafe74eSOleksandr Tymoshenko }
411cdafe74eSOleksandr Tymoshenko
412cdafe74eSOleksandr Tymoshenko /*
413cdafe74eSOleksandr Tymoshenko * Setup DMA source parameters
414cdafe74eSOleksandr Tymoshenko * ch - channel number
415cdafe74eSOleksandr Tymoshenko * dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if
416cdafe74eSOleksandr Tymoshenko * source is physical memory
417cdafe74eSOleksandr Tymoshenko * inc_addr - BCM_DMA_INC_ADDR if source address
418cdafe74eSOleksandr Tymoshenko * should be increased after each access or
419cdafe74eSOleksandr Tymoshenko * BCM_DMA_SAME_ADDR if address should remain
420cdafe74eSOleksandr Tymoshenko * the same
421cdafe74eSOleksandr Tymoshenko * width - size of read operation, BCM_DMA_32BIT
422cdafe74eSOleksandr Tymoshenko * for 32bit bursts, BCM_DMA_128BIT for 128 bits
423cdafe74eSOleksandr Tymoshenko *
424cdafe74eSOleksandr Tymoshenko * Returns 0 on success, -1 otherwise
425cdafe74eSOleksandr Tymoshenko */
426cdafe74eSOleksandr Tymoshenko int
bcm_dma_setup_src(int ch,int dreq,int inc_addr,int width)427cdafe74eSOleksandr Tymoshenko bcm_dma_setup_src(int ch, int dreq, int inc_addr, int width)
428cdafe74eSOleksandr Tymoshenko {
429cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc *sc = bcm_dma_sc;
430cdafe74eSOleksandr Tymoshenko uint32_t info;
431cdafe74eSOleksandr Tymoshenko
432cdafe74eSOleksandr Tymoshenko if (ch < 0 || ch >= BCM_DMA_CH_MAX)
433cdafe74eSOleksandr Tymoshenko return (-1);
434cdafe74eSOleksandr Tymoshenko
435cdafe74eSOleksandr Tymoshenko if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED))
436cdafe74eSOleksandr Tymoshenko return (-1);
437cdafe74eSOleksandr Tymoshenko
438cdafe74eSOleksandr Tymoshenko info = sc->sc_dma_ch[ch].cb->info;
439cdafe74eSOleksandr Tymoshenko info &= ~INFO_PERMAP_MASK;
440cdafe74eSOleksandr Tymoshenko info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK;
441cdafe74eSOleksandr Tymoshenko
442cdafe74eSOleksandr Tymoshenko if (dreq)
443cdafe74eSOleksandr Tymoshenko info |= INFO_S_DREQ;
444cdafe74eSOleksandr Tymoshenko else
445cdafe74eSOleksandr Tymoshenko info &= ~INFO_S_DREQ;
446cdafe74eSOleksandr Tymoshenko
447cdafe74eSOleksandr Tymoshenko if (width == BCM_DMA_128BIT)
448cdafe74eSOleksandr Tymoshenko info |= INFO_S_WIDTH;
449cdafe74eSOleksandr Tymoshenko else
450cdafe74eSOleksandr Tymoshenko info &= ~INFO_S_WIDTH;
451cdafe74eSOleksandr Tymoshenko
452cdafe74eSOleksandr Tymoshenko if (inc_addr == BCM_DMA_INC_ADDR)
453cdafe74eSOleksandr Tymoshenko info |= INFO_S_INC;
454cdafe74eSOleksandr Tymoshenko else
455cdafe74eSOleksandr Tymoshenko info &= ~INFO_S_INC;
456cdafe74eSOleksandr Tymoshenko
457cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].cb->info = info;
458cdafe74eSOleksandr Tymoshenko
459cdafe74eSOleksandr Tymoshenko return (0);
460cdafe74eSOleksandr Tymoshenko }
461cdafe74eSOleksandr Tymoshenko
462cdafe74eSOleksandr Tymoshenko /*
463cdafe74eSOleksandr Tymoshenko * Setup DMA destination parameters
464cdafe74eSOleksandr Tymoshenko * ch - channel number
465cdafe74eSOleksandr Tymoshenko * dreq - hardware DREQ # or BCM_DMA_DREQ_NONE if
466cdafe74eSOleksandr Tymoshenko * destination is physical memory
467cdafe74eSOleksandr Tymoshenko * inc_addr - BCM_DMA_INC_ADDR if source address
468cdafe74eSOleksandr Tymoshenko * should be increased after each access or
469cdafe74eSOleksandr Tymoshenko * BCM_DMA_SAME_ADDR if address should remain
470cdafe74eSOleksandr Tymoshenko * the same
471cdafe74eSOleksandr Tymoshenko * width - size of write operation, BCM_DMA_32BIT
472cdafe74eSOleksandr Tymoshenko * for 32bit bursts, BCM_DMA_128BIT for 128 bits
473cdafe74eSOleksandr Tymoshenko *
474cdafe74eSOleksandr Tymoshenko * Returns 0 on success, -1 otherwise
475cdafe74eSOleksandr Tymoshenko */
476cdafe74eSOleksandr Tymoshenko int
bcm_dma_setup_dst(int ch,int dreq,int inc_addr,int width)477cdafe74eSOleksandr Tymoshenko bcm_dma_setup_dst(int ch, int dreq, int inc_addr, int width)
478cdafe74eSOleksandr Tymoshenko {
479cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc *sc = bcm_dma_sc;
480cdafe74eSOleksandr Tymoshenko uint32_t info;
481cdafe74eSOleksandr Tymoshenko
482cdafe74eSOleksandr Tymoshenko if (ch < 0 || ch >= BCM_DMA_CH_MAX)
483cdafe74eSOleksandr Tymoshenko return (-1);
484cdafe74eSOleksandr Tymoshenko
485cdafe74eSOleksandr Tymoshenko if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED))
486cdafe74eSOleksandr Tymoshenko return (-1);
487cdafe74eSOleksandr Tymoshenko
488cdafe74eSOleksandr Tymoshenko info = sc->sc_dma_ch[ch].cb->info;
489cdafe74eSOleksandr Tymoshenko info &= ~INFO_PERMAP_MASK;
490cdafe74eSOleksandr Tymoshenko info |= (dreq << INFO_PERMAP_SHIFT) & INFO_PERMAP_MASK;
491cdafe74eSOleksandr Tymoshenko
492cdafe74eSOleksandr Tymoshenko if (dreq)
493cdafe74eSOleksandr Tymoshenko info |= INFO_D_DREQ;
494cdafe74eSOleksandr Tymoshenko else
495cdafe74eSOleksandr Tymoshenko info &= ~INFO_D_DREQ;
496cdafe74eSOleksandr Tymoshenko
497cdafe74eSOleksandr Tymoshenko if (width == BCM_DMA_128BIT)
498cdafe74eSOleksandr Tymoshenko info |= INFO_D_WIDTH;
499cdafe74eSOleksandr Tymoshenko else
500cdafe74eSOleksandr Tymoshenko info &= ~INFO_D_WIDTH;
501cdafe74eSOleksandr Tymoshenko
502cdafe74eSOleksandr Tymoshenko if (inc_addr == BCM_DMA_INC_ADDR)
503cdafe74eSOleksandr Tymoshenko info |= INFO_D_INC;
504cdafe74eSOleksandr Tymoshenko else
505cdafe74eSOleksandr Tymoshenko info &= ~INFO_D_INC;
506cdafe74eSOleksandr Tymoshenko
507cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].cb->info = info;
508cdafe74eSOleksandr Tymoshenko
509cdafe74eSOleksandr Tymoshenko return (0);
510cdafe74eSOleksandr Tymoshenko }
511cdafe74eSOleksandr Tymoshenko
512cdafe74eSOleksandr Tymoshenko #ifdef DEBUG
513cdafe74eSOleksandr Tymoshenko void
bcm_dma_cb_dump(struct bcm_dma_cb * cb)514cdafe74eSOleksandr Tymoshenko bcm_dma_cb_dump(struct bcm_dma_cb *cb)
515cdafe74eSOleksandr Tymoshenko {
516cdafe74eSOleksandr Tymoshenko
517cdafe74eSOleksandr Tymoshenko printf("DMA CB ");
518cdafe74eSOleksandr Tymoshenko printf("INFO: %8.8x ", cb->info);
519cdafe74eSOleksandr Tymoshenko printf("SRC: %8.8x ", cb->src);
520cdafe74eSOleksandr Tymoshenko printf("DST: %8.8x ", cb->dst);
521cdafe74eSOleksandr Tymoshenko printf("LEN: %8.8x ", cb->len);
522cdafe74eSOleksandr Tymoshenko printf("\n");
523cdafe74eSOleksandr Tymoshenko printf("STRIDE: %8.8x ", cb->stride);
524cdafe74eSOleksandr Tymoshenko printf("NEXT: %8.8x ", cb->next);
525cdafe74eSOleksandr Tymoshenko printf("RSVD1: %8.8x ", cb->rsvd1);
526cdafe74eSOleksandr Tymoshenko printf("RSVD2: %8.8x ", cb->rsvd2);
527cdafe74eSOleksandr Tymoshenko printf("\n");
528cdafe74eSOleksandr Tymoshenko }
529cdafe74eSOleksandr Tymoshenko
530cdafe74eSOleksandr Tymoshenko void
bcm_dma_reg_dump(int ch)531cdafe74eSOleksandr Tymoshenko bcm_dma_reg_dump(int ch)
532cdafe74eSOleksandr Tymoshenko {
533cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc *sc = bcm_dma_sc;
534cdafe74eSOleksandr Tymoshenko int i;
535cdafe74eSOleksandr Tymoshenko uint32_t reg;
536cdafe74eSOleksandr Tymoshenko
53707ccf714SMitchell Horne if (sc == NULL)
53807ccf714SMitchell Horne return;
53907ccf714SMitchell Horne
540cdafe74eSOleksandr Tymoshenko if (ch < 0 || ch >= BCM_DMA_CH_MAX)
541cdafe74eSOleksandr Tymoshenko return;
542cdafe74eSOleksandr Tymoshenko
543cdafe74eSOleksandr Tymoshenko printf("DMA%d: ", ch);
544cdafe74eSOleksandr Tymoshenko for (i = 0; i < MAX_REG; i++) {
545cdafe74eSOleksandr Tymoshenko reg = bus_read_4(sc->sc_mem, BCM_DMA_CH(ch) + i*4);
546cdafe74eSOleksandr Tymoshenko printf("%8.8x ", reg);
547cdafe74eSOleksandr Tymoshenko }
548cdafe74eSOleksandr Tymoshenko printf("\n");
549cdafe74eSOleksandr Tymoshenko }
550cdafe74eSOleksandr Tymoshenko #endif
551cdafe74eSOleksandr Tymoshenko
552cdafe74eSOleksandr Tymoshenko /*
553cdafe74eSOleksandr Tymoshenko * Start DMA transaction
554cdafe74eSOleksandr Tymoshenko * ch - channel number
555cdafe74eSOleksandr Tymoshenko * src, dst - source and destination address in
556cdafe74eSOleksandr Tymoshenko * ARM physical memory address space.
557255eff3bSPedro F. Giffuni * len - amount of bytes to be transferred
558cdafe74eSOleksandr Tymoshenko *
559cdafe74eSOleksandr Tymoshenko * Returns 0 on success, -1 otherwise
560cdafe74eSOleksandr Tymoshenko */
561cdafe74eSOleksandr Tymoshenko int
bcm_dma_start(int ch,vm_paddr_t src,vm_paddr_t dst,int len)562cdafe74eSOleksandr Tymoshenko bcm_dma_start(int ch, vm_paddr_t src, vm_paddr_t dst, int len)
563cdafe74eSOleksandr Tymoshenko {
564cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc *sc = bcm_dma_sc;
565cdafe74eSOleksandr Tymoshenko struct bcm_dma_cb *cb;
566cdafe74eSOleksandr Tymoshenko
56707ccf714SMitchell Horne if (sc == NULL)
56807ccf714SMitchell Horne return (-1);
56907ccf714SMitchell Horne
570cdafe74eSOleksandr Tymoshenko if (ch < 0 || ch >= BCM_DMA_CH_MAX)
571cdafe74eSOleksandr Tymoshenko return (-1);
572cdafe74eSOleksandr Tymoshenko
573cdafe74eSOleksandr Tymoshenko if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED))
574cdafe74eSOleksandr Tymoshenko return (-1);
575cdafe74eSOleksandr Tymoshenko
576cdafe74eSOleksandr Tymoshenko cb = sc->sc_dma_ch[ch].cb;
57740084ac3SKyle Evans cb->src = ARMC_TO_VCBUS(src);
57840084ac3SKyle Evans cb->dst = ARMC_TO_VCBUS(dst);
57940084ac3SKyle Evans
580cdafe74eSOleksandr Tymoshenko cb->len = len;
581cdafe74eSOleksandr Tymoshenko
582cdafe74eSOleksandr Tymoshenko bus_dmamap_sync(sc->sc_dma_tag,
583cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].dma_map, BUS_DMASYNC_PREWRITE);
584cdafe74eSOleksandr Tymoshenko
585cdafe74eSOleksandr Tymoshenko bus_write_4(sc->sc_mem, BCM_DMA_CBADDR(ch),
586cdafe74eSOleksandr Tymoshenko sc->sc_dma_ch[ch].vc_cb);
587cdafe74eSOleksandr Tymoshenko bus_write_4(sc->sc_mem, BCM_DMA_CS(ch), CS_ACTIVE);
588cdafe74eSOleksandr Tymoshenko
589cdafe74eSOleksandr Tymoshenko #ifdef DEBUG
590cdafe74eSOleksandr Tymoshenko bcm_dma_cb_dump(sc->sc_dma_ch[ch].cb);
591cdafe74eSOleksandr Tymoshenko bcm_dma_reg_dump(ch);
592cdafe74eSOleksandr Tymoshenko #endif
593cdafe74eSOleksandr Tymoshenko
594cdafe74eSOleksandr Tymoshenko return (0);
595cdafe74eSOleksandr Tymoshenko }
596cdafe74eSOleksandr Tymoshenko
597cdafe74eSOleksandr Tymoshenko /*
598cdafe74eSOleksandr Tymoshenko * Get length requested for DMA transaction
599cdafe74eSOleksandr Tymoshenko * ch - channel number
600cdafe74eSOleksandr Tymoshenko *
601cdafe74eSOleksandr Tymoshenko * Returns size of transaction, 0 if channel is invalid
602cdafe74eSOleksandr Tymoshenko */
603cdafe74eSOleksandr Tymoshenko uint32_t
bcm_dma_length(int ch)604cdafe74eSOleksandr Tymoshenko bcm_dma_length(int ch)
605cdafe74eSOleksandr Tymoshenko {
606cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc *sc = bcm_dma_sc;
607cdafe74eSOleksandr Tymoshenko struct bcm_dma_cb *cb;
608cdafe74eSOleksandr Tymoshenko
60907ccf714SMitchell Horne if (sc == NULL)
61007ccf714SMitchell Horne return (0);
61107ccf714SMitchell Horne
612cdafe74eSOleksandr Tymoshenko if (ch < 0 || ch >= BCM_DMA_CH_MAX)
613cdafe74eSOleksandr Tymoshenko return (0);
614cdafe74eSOleksandr Tymoshenko
615cdafe74eSOleksandr Tymoshenko if (!(sc->sc_dma_ch[ch].flags & BCM_DMA_CH_USED))
616cdafe74eSOleksandr Tymoshenko return (0);
617cdafe74eSOleksandr Tymoshenko
618cdafe74eSOleksandr Tymoshenko cb = sc->sc_dma_ch[ch].cb;
619cdafe74eSOleksandr Tymoshenko
620cdafe74eSOleksandr Tymoshenko return (cb->len);
621cdafe74eSOleksandr Tymoshenko }
622cdafe74eSOleksandr Tymoshenko
623cdafe74eSOleksandr Tymoshenko static void
bcm_dma_intr(void * arg)624cdafe74eSOleksandr Tymoshenko bcm_dma_intr(void *arg)
625cdafe74eSOleksandr Tymoshenko {
626cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc *sc = bcm_dma_sc;
627cdafe74eSOleksandr Tymoshenko struct bcm_dma_ch *ch = (struct bcm_dma_ch *)arg;
628cdafe74eSOleksandr Tymoshenko uint32_t cs, debug;
629cdafe74eSOleksandr Tymoshenko
630cdafe74eSOleksandr Tymoshenko /* my interrupt? */
631cdafe74eSOleksandr Tymoshenko cs = bus_read_4(sc->sc_mem, BCM_DMA_CS(ch->ch));
632cdafe74eSOleksandr Tymoshenko
633c23f22a9SKyle Evans /*
634c23f22a9SKyle Evans * Is it an active channel? Our diagnostics could be better here, but
635c23f22a9SKyle Evans * it's not necessarily an easy task to resolve a rid/resource to an
636c23f22a9SKyle Evans * actual irq number. We'd want to do this to set a flag indicating
637c23f22a9SKyle Evans * whether the irq is shared or not, so we know to complain.
638c23f22a9SKyle Evans */
639c23f22a9SKyle Evans if (!(ch->flags & BCM_DMA_CH_USED))
640cdafe74eSOleksandr Tymoshenko return;
641cdafe74eSOleksandr Tymoshenko
642c23f22a9SKyle Evans /* Again, we can't complain here. The same logic applies. */
643c23f22a9SKyle Evans if (!(cs & (CS_INT | CS_ERR)))
644cdafe74eSOleksandr Tymoshenko return;
645cdafe74eSOleksandr Tymoshenko
646cdafe74eSOleksandr Tymoshenko if (cs & CS_ERR) {
647cdafe74eSOleksandr Tymoshenko debug = bus_read_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch));
648cdafe74eSOleksandr Tymoshenko device_printf(sc->sc_dev, "DMA error %d on CH%d\n",
649cdafe74eSOleksandr Tymoshenko debug & DEBUG_ERROR_MASK, ch->ch);
650cdafe74eSOleksandr Tymoshenko bus_write_4(sc->sc_mem, BCM_DMA_DEBUG(ch->ch),
651cdafe74eSOleksandr Tymoshenko debug & DEBUG_ERROR_MASK);
652e9401a9eSOleksandr Tymoshenko bcm_dma_reset(sc->sc_dev, ch->ch);
653cdafe74eSOleksandr Tymoshenko }
654cdafe74eSOleksandr Tymoshenko
655cdafe74eSOleksandr Tymoshenko if (cs & CS_INT) {
656cdafe74eSOleksandr Tymoshenko /* acknowledge interrupt */
657cdafe74eSOleksandr Tymoshenko bus_write_4(sc->sc_mem, BCM_DMA_CS(ch->ch),
658cdafe74eSOleksandr Tymoshenko CS_INT | CS_END);
659cdafe74eSOleksandr Tymoshenko
660cdafe74eSOleksandr Tymoshenko /* Prepare for possible access to len field */
661cdafe74eSOleksandr Tymoshenko bus_dmamap_sync(sc->sc_dma_tag, ch->dma_map,
662cdafe74eSOleksandr Tymoshenko BUS_DMASYNC_POSTWRITE);
663cdafe74eSOleksandr Tymoshenko
664cdafe74eSOleksandr Tymoshenko /* save callback function and argument */
665cdafe74eSOleksandr Tymoshenko if (ch->intr_func)
666cdafe74eSOleksandr Tymoshenko ch->intr_func(ch->ch, ch->intr_arg);
667cdafe74eSOleksandr Tymoshenko }
668cdafe74eSOleksandr Tymoshenko }
669cdafe74eSOleksandr Tymoshenko
670cdafe74eSOleksandr Tymoshenko static int
bcm_dma_probe(device_t dev)671cdafe74eSOleksandr Tymoshenko bcm_dma_probe(device_t dev)
672cdafe74eSOleksandr Tymoshenko {
673cdafe74eSOleksandr Tymoshenko
674add35ed5SIan Lepore if (!ofw_bus_status_okay(dev))
675add35ed5SIan Lepore return (ENXIO);
676add35ed5SIan Lepore
6779d6eb8bbSOleksandr Tymoshenko if (ofw_bus_search_compatible(dev, compat_data)->ocd_data == 0)
678cdafe74eSOleksandr Tymoshenko return (ENXIO);
679cdafe74eSOleksandr Tymoshenko
680cdafe74eSOleksandr Tymoshenko device_set_desc(dev, "BCM2835 DMA Controller");
681cdafe74eSOleksandr Tymoshenko return (BUS_PROBE_DEFAULT);
682cdafe74eSOleksandr Tymoshenko }
683cdafe74eSOleksandr Tymoshenko
684cdafe74eSOleksandr Tymoshenko static int
bcm_dma_attach(device_t dev)685cdafe74eSOleksandr Tymoshenko bcm_dma_attach(device_t dev)
686cdafe74eSOleksandr Tymoshenko {
687cdafe74eSOleksandr Tymoshenko struct bcm_dma_softc *sc = device_get_softc(dev);
688ec950d0aSSvatopluk Kraus phandle_t node;
689cdafe74eSOleksandr Tymoshenko int rid, err = 0;
690cdafe74eSOleksandr Tymoshenko int i;
691cdafe74eSOleksandr Tymoshenko
692cdafe74eSOleksandr Tymoshenko sc->sc_dev = dev;
693cdafe74eSOleksandr Tymoshenko
694cdafe74eSOleksandr Tymoshenko if (bcm_dma_sc)
695cdafe74eSOleksandr Tymoshenko return (ENXIO);
696cdafe74eSOleksandr Tymoshenko
697cdafe74eSOleksandr Tymoshenko for (i = 0; i < BCM_DMA_CH_MAX; i++) {
698cdafe74eSOleksandr Tymoshenko sc->sc_irq[i] = NULL;
699cdafe74eSOleksandr Tymoshenko sc->sc_intrhand[i] = NULL;
700cdafe74eSOleksandr Tymoshenko }
701cdafe74eSOleksandr Tymoshenko
702ec950d0aSSvatopluk Kraus /* Get DMA channel mask. */
703ec950d0aSSvatopluk Kraus node = ofw_bus_get_node(sc->sc_dev);
704ec950d0aSSvatopluk Kraus if (OF_getencprop(node, "brcm,dma-channel-mask", &bcm_dma_channel_mask,
705ec950d0aSSvatopluk Kraus sizeof(bcm_dma_channel_mask)) == -1 &&
706ec950d0aSSvatopluk Kraus OF_getencprop(node, "broadcom,channels", &bcm_dma_channel_mask,
707ec950d0aSSvatopluk Kraus sizeof(bcm_dma_channel_mask)) == -1) {
708ec950d0aSSvatopluk Kraus device_printf(dev, "could not get channel mask property\n");
709ec950d0aSSvatopluk Kraus return (ENXIO);
710ec950d0aSSvatopluk Kraus }
711ec950d0aSSvatopluk Kraus
712ec950d0aSSvatopluk Kraus /* Mask out channels used by GPU. */
713ec950d0aSSvatopluk Kraus bcm_dma_channel_mask &= ~BCM_DMA_CH_GPU_MASK;
714ec950d0aSSvatopluk Kraus
715cdafe74eSOleksandr Tymoshenko /* DMA0 - DMA14 */
716cdafe74eSOleksandr Tymoshenko rid = 0;
717cdafe74eSOleksandr Tymoshenko sc->sc_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE);
718cdafe74eSOleksandr Tymoshenko if (sc->sc_mem == NULL) {
719cdafe74eSOleksandr Tymoshenko device_printf(dev, "could not allocate memory resource\n");
720cdafe74eSOleksandr Tymoshenko return (ENXIO);
721cdafe74eSOleksandr Tymoshenko }
722cdafe74eSOleksandr Tymoshenko
723cdafe74eSOleksandr Tymoshenko /* IRQ DMA0 - DMA11 XXX NOT USE DMA12(spurious?) */
724cdafe74eSOleksandr Tymoshenko for (rid = 0; rid < BCM_DMA_CH_MAX; rid++) {
725ec950d0aSSvatopluk Kraus if ((bcm_dma_channel_mask & (1 << rid)) == 0)
726ec950d0aSSvatopluk Kraus continue;
727ec950d0aSSvatopluk Kraus
728cdafe74eSOleksandr Tymoshenko sc->sc_irq[rid] = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
729c23f22a9SKyle Evans RF_ACTIVE | RF_SHAREABLE);
730cdafe74eSOleksandr Tymoshenko if (sc->sc_irq[rid] == NULL) {
731cdafe74eSOleksandr Tymoshenko device_printf(dev, "cannot allocate interrupt\n");
732cdafe74eSOleksandr Tymoshenko err = ENXIO;
733cdafe74eSOleksandr Tymoshenko goto fail;
734cdafe74eSOleksandr Tymoshenko }
735cdafe74eSOleksandr Tymoshenko if (bus_setup_intr(dev, sc->sc_irq[rid], INTR_TYPE_MISC | INTR_MPSAFE,
736cdafe74eSOleksandr Tymoshenko NULL, bcm_dma_intr, &sc->sc_dma_ch[rid],
737cdafe74eSOleksandr Tymoshenko &sc->sc_intrhand[rid])) {
738cdafe74eSOleksandr Tymoshenko device_printf(dev, "cannot setup interrupt handler\n");
739cdafe74eSOleksandr Tymoshenko err = ENXIO;
740cdafe74eSOleksandr Tymoshenko goto fail;
741cdafe74eSOleksandr Tymoshenko }
742cdafe74eSOleksandr Tymoshenko }
743cdafe74eSOleksandr Tymoshenko
744cdafe74eSOleksandr Tymoshenko mtx_init(&sc->sc_mtx, "bcmdma", "bcmdma", MTX_DEF);
745cdafe74eSOleksandr Tymoshenko bcm_dma_sc = sc;
746cdafe74eSOleksandr Tymoshenko
747cdafe74eSOleksandr Tymoshenko err = bcm_dma_init(dev);
748cdafe74eSOleksandr Tymoshenko if (err)
749cdafe74eSOleksandr Tymoshenko goto fail;
750cdafe74eSOleksandr Tymoshenko
751cdafe74eSOleksandr Tymoshenko return (err);
752cdafe74eSOleksandr Tymoshenko
753cdafe74eSOleksandr Tymoshenko fail:
754cdafe74eSOleksandr Tymoshenko if (sc->sc_mem)
755cdafe74eSOleksandr Tymoshenko bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->sc_mem);
756cdafe74eSOleksandr Tymoshenko
757cdafe74eSOleksandr Tymoshenko for (i = 0; i < BCM_DMA_CH_MAX; i++) {
758cdafe74eSOleksandr Tymoshenko if (sc->sc_intrhand[i])
759cdafe74eSOleksandr Tymoshenko bus_teardown_intr(dev, sc->sc_irq[i], sc->sc_intrhand[i]);
760cdafe74eSOleksandr Tymoshenko if (sc->sc_irq[i])
761cdafe74eSOleksandr Tymoshenko bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq[i]);
762cdafe74eSOleksandr Tymoshenko }
763cdafe74eSOleksandr Tymoshenko
764cdafe74eSOleksandr Tymoshenko return (err);
765cdafe74eSOleksandr Tymoshenko }
766cdafe74eSOleksandr Tymoshenko
767cdafe74eSOleksandr Tymoshenko static device_method_t bcm_dma_methods[] = {
768cdafe74eSOleksandr Tymoshenko DEVMETHOD(device_probe, bcm_dma_probe),
769cdafe74eSOleksandr Tymoshenko DEVMETHOD(device_attach, bcm_dma_attach),
770cdafe74eSOleksandr Tymoshenko { 0, 0 }
771cdafe74eSOleksandr Tymoshenko };
772cdafe74eSOleksandr Tymoshenko
773cdafe74eSOleksandr Tymoshenko static driver_t bcm_dma_driver = {
774cdafe74eSOleksandr Tymoshenko "bcm_dma",
775cdafe74eSOleksandr Tymoshenko bcm_dma_methods,
776cdafe74eSOleksandr Tymoshenko sizeof(struct bcm_dma_softc),
777cdafe74eSOleksandr Tymoshenko };
778cdafe74eSOleksandr Tymoshenko
7799873b171SMark Millard EARLY_DRIVER_MODULE(bcm_dma, simplebus, bcm_dma_driver, 0, 0,
7809873b171SMark Millard BUS_PASS_SUPPORTDEV + BUS_PASS_ORDER_MIDDLE);
781cdafe74eSOleksandr Tymoshenko MODULE_VERSION(bcm_dma, 1);
782