1*5fd54aceSGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
2550a7375SFelipe Balbi /*
3550a7375SFelipe Balbi * TUSB6010 USB 2.0 OTG Dual Role controller OMAP DMA interface
4550a7375SFelipe Balbi *
5550a7375SFelipe Balbi * Copyright (C) 2006 Nokia Corporation
6550a7375SFelipe Balbi * Tony Lindgren <tony@atomide.com>
7550a7375SFelipe Balbi */
8550a7375SFelipe Balbi #include <linux/module.h>
9550a7375SFelipe Balbi #include <linux/kernel.h>
10550a7375SFelipe Balbi #include <linux/errno.h>
11550a7375SFelipe Balbi #include <linux/usb.h>
12550a7375SFelipe Balbi #include <linux/platform_device.h>
13550a7375SFelipe Balbi #include <linux/dma-mapping.h>
145a0e3ad6STejun Heo #include <linux/slab.h>
159c691cc9SPeter Ujfalusi #include <linux/dmaengine.h>
16550a7375SFelipe Balbi
17550a7375SFelipe Balbi #include "musb_core.h"
18240a16e2SFelipe Balbi #include "tusb6010.h"
19550a7375SFelipe Balbi
20550a7375SFelipe Balbi #define to_chdat(c) ((struct tusb_omap_dma_ch *)(c)->private_data)
21550a7375SFelipe Balbi
22550a7375SFelipe Balbi #define MAX_DMAREQ 5 /* REVISIT: Really 6, but req5 not OK */
23550a7375SFelipe Balbi
241df9d9ecSPeter Ujfalusi struct tusb_dma_data {
251df9d9ecSPeter Ujfalusi s8 dmareq;
269c691cc9SPeter Ujfalusi struct dma_chan *chan;
271df9d9ecSPeter Ujfalusi };
281df9d9ecSPeter Ujfalusi
29550a7375SFelipe Balbi struct tusb_omap_dma_ch {
30550a7375SFelipe Balbi struct musb *musb;
31550a7375SFelipe Balbi void __iomem *tbase;
32550a7375SFelipe Balbi unsigned long phys_offset;
33550a7375SFelipe Balbi int epnum;
34550a7375SFelipe Balbi u8 tx;
35550a7375SFelipe Balbi struct musb_hw_ep *hw_ep;
36550a7375SFelipe Balbi
374cadc711SPeter Ujfalusi struct tusb_dma_data *dma_data;
38550a7375SFelipe Balbi
39550a7375SFelipe Balbi struct tusb_omap_dma *tusb_dma;
40550a7375SFelipe Balbi
411d0f11b3STony Lindgren dma_addr_t dma_addr;
42550a7375SFelipe Balbi
43550a7375SFelipe Balbi u32 len;
44550a7375SFelipe Balbi u16 packet_sz;
45550a7375SFelipe Balbi u16 transfer_packet_sz;
46550a7375SFelipe Balbi u32 transfer_len;
47550a7375SFelipe Balbi u32 completed_len;
48550a7375SFelipe Balbi };
49550a7375SFelipe Balbi
50550a7375SFelipe Balbi struct tusb_omap_dma {
51550a7375SFelipe Balbi struct dma_controller controller;
52550a7375SFelipe Balbi void __iomem *tbase;
53550a7375SFelipe Balbi
544cadc711SPeter Ujfalusi struct tusb_dma_data dma_pool[MAX_DMAREQ];
55550a7375SFelipe Balbi unsigned multichannel:1;
56550a7375SFelipe Balbi };
57550a7375SFelipe Balbi
58550a7375SFelipe Balbi /*
59550a7375SFelipe Balbi * Allocate dmareq0 to the current channel unless it's already taken
60550a7375SFelipe Balbi */
tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch * chdat)61550a7375SFelipe Balbi static inline int tusb_omap_use_shared_dmareq(struct tusb_omap_dma_ch *chdat)
62550a7375SFelipe Balbi {
63550a7375SFelipe Balbi u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
64550a7375SFelipe Balbi
65550a7375SFelipe Balbi if (reg != 0) {
6674c6f3a4SSergei Trofimovich dev_dbg(chdat->musb->controller, "ep%i dmareq0 is busy for ep%i\n",
67550a7375SFelipe Balbi chdat->epnum, reg & 0xf);
68550a7375SFelipe Balbi return -EAGAIN;
69550a7375SFelipe Balbi }
70550a7375SFelipe Balbi
71550a7375SFelipe Balbi if (chdat->tx)
72550a7375SFelipe Balbi reg = (1 << 4) | chdat->epnum;
73550a7375SFelipe Balbi else
74550a7375SFelipe Balbi reg = chdat->epnum;
75550a7375SFelipe Balbi
76550a7375SFelipe Balbi musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
77550a7375SFelipe Balbi
78550a7375SFelipe Balbi return 0;
79550a7375SFelipe Balbi }
80550a7375SFelipe Balbi
tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch * chdat)81550a7375SFelipe Balbi static inline void tusb_omap_free_shared_dmareq(struct tusb_omap_dma_ch *chdat)
82550a7375SFelipe Balbi {
83550a7375SFelipe Balbi u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
84550a7375SFelipe Balbi
85550a7375SFelipe Balbi if ((reg & 0xf) != chdat->epnum) {
86550a7375SFelipe Balbi printk(KERN_ERR "ep%i trying to release dmareq0 for ep%i\n",
87550a7375SFelipe Balbi chdat->epnum, reg & 0xf);
88550a7375SFelipe Balbi return;
89550a7375SFelipe Balbi }
90550a7375SFelipe Balbi musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, 0);
91550a7375SFelipe Balbi }
92550a7375SFelipe Balbi
93550a7375SFelipe Balbi /*
94550a7375SFelipe Balbi * See also musb_dma_completion in plat_uds.c and musb_g_[tx|rx]() in
95550a7375SFelipe Balbi * musb_gadget.c.
96550a7375SFelipe Balbi */
tusb_omap_dma_cb(void * data)979c691cc9SPeter Ujfalusi static void tusb_omap_dma_cb(void *data)
98550a7375SFelipe Balbi {
99550a7375SFelipe Balbi struct dma_channel *channel = (struct dma_channel *)data;
100550a7375SFelipe Balbi struct tusb_omap_dma_ch *chdat = to_chdat(channel);
101550a7375SFelipe Balbi struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
102550a7375SFelipe Balbi struct musb *musb = chdat->musb;
1031d0f11b3STony Lindgren struct device *dev = musb->controller;
104550a7375SFelipe Balbi struct musb_hw_ep *hw_ep = chdat->hw_ep;
105550a7375SFelipe Balbi void __iomem *ep_conf = hw_ep->conf;
106550a7375SFelipe Balbi void __iomem *mbase = musb->mregs;
107550a7375SFelipe Balbi unsigned long remaining, flags, pio;
108550a7375SFelipe Balbi
109550a7375SFelipe Balbi spin_lock_irqsave(&musb->lock, flags);
110550a7375SFelipe Balbi
1119c691cc9SPeter Ujfalusi dev_dbg(musb->controller, "ep%i %s dma callback\n",
1129c691cc9SPeter Ujfalusi chdat->epnum, chdat->tx ? "tx" : "rx");
113550a7375SFelipe Balbi
114550a7375SFelipe Balbi if (chdat->tx)
115550a7375SFelipe Balbi remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
116550a7375SFelipe Balbi else
117550a7375SFelipe Balbi remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
118550a7375SFelipe Balbi
119550a7375SFelipe Balbi remaining = TUSB_EP_CONFIG_XFR_SIZE(remaining);
120550a7375SFelipe Balbi
121550a7375SFelipe Balbi /* HW issue #10: XFR_SIZE may get corrupt on DMA (both async & sync) */
122550a7375SFelipe Balbi if (unlikely(remaining > chdat->transfer_len)) {
1239c691cc9SPeter Ujfalusi dev_dbg(musb->controller, "Corrupt %s XFR_SIZE: 0x%08lx\n",
1249c691cc9SPeter Ujfalusi chdat->tx ? "tx" : "rx", remaining);
125550a7375SFelipe Balbi remaining = 0;
126550a7375SFelipe Balbi }
127550a7375SFelipe Balbi
128550a7375SFelipe Balbi channel->actual_len = chdat->transfer_len - remaining;
129550a7375SFelipe Balbi pio = chdat->len - channel->actual_len;
130550a7375SFelipe Balbi
1315c8a86e1SFelipe Balbi dev_dbg(musb->controller, "DMA remaining %lu/%u\n", remaining, chdat->transfer_len);
132550a7375SFelipe Balbi
133550a7375SFelipe Balbi /* Transfer remaining 1 - 31 bytes */
134550a7375SFelipe Balbi if (pio > 0 && pio < 32) {
135550a7375SFelipe Balbi u8 *buf;
136550a7375SFelipe Balbi
1375c8a86e1SFelipe Balbi dev_dbg(musb->controller, "Using PIO for remaining %lu bytes\n", pio);
138550a7375SFelipe Balbi buf = phys_to_virt((u32)chdat->dma_addr) + chdat->transfer_len;
139550a7375SFelipe Balbi if (chdat->tx) {
1401d0f11b3STony Lindgren dma_unmap_single(dev, chdat->dma_addr,
1411d0f11b3STony Lindgren chdat->transfer_len,
1421d0f11b3STony Lindgren DMA_TO_DEVICE);
143550a7375SFelipe Balbi musb_write_fifo(hw_ep, pio, buf);
144550a7375SFelipe Balbi } else {
1451d0f11b3STony Lindgren dma_unmap_single(dev, chdat->dma_addr,
1461d0f11b3STony Lindgren chdat->transfer_len,
1471d0f11b3STony Lindgren DMA_FROM_DEVICE);
148550a7375SFelipe Balbi musb_read_fifo(hw_ep, pio, buf);
149550a7375SFelipe Balbi }
150550a7375SFelipe Balbi channel->actual_len += pio;
151550a7375SFelipe Balbi }
152550a7375SFelipe Balbi
153550a7375SFelipe Balbi if (!tusb_dma->multichannel)
154550a7375SFelipe Balbi tusb_omap_free_shared_dmareq(chdat);
155550a7375SFelipe Balbi
156550a7375SFelipe Balbi channel->status = MUSB_DMA_STATUS_FREE;
157550a7375SFelipe Balbi
158550a7375SFelipe Balbi musb_dma_completion(musb, chdat->epnum, chdat->tx);
159550a7375SFelipe Balbi
160550a7375SFelipe Balbi /* We must terminate short tx transfers manually by setting TXPKTRDY.
161550a7375SFelipe Balbi * REVISIT: This same problem may occur with other MUSB dma as well.
162550a7375SFelipe Balbi * Easy to test with g_ether by pinging the MUSB board with ping -s54.
163550a7375SFelipe Balbi */
164550a7375SFelipe Balbi if ((chdat->transfer_len < chdat->packet_sz)
165550a7375SFelipe Balbi || (chdat->transfer_len % chdat->packet_sz != 0)) {
166550a7375SFelipe Balbi u16 csr;
167550a7375SFelipe Balbi
168550a7375SFelipe Balbi if (chdat->tx) {
1695c8a86e1SFelipe Balbi dev_dbg(musb->controller, "terminating short tx packet\n");
170550a7375SFelipe Balbi musb_ep_select(mbase, chdat->epnum);
171550a7375SFelipe Balbi csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
172550a7375SFelipe Balbi csr |= MUSB_TXCSR_MODE | MUSB_TXCSR_TXPKTRDY
173550a7375SFelipe Balbi | MUSB_TXCSR_P_WZC_BITS;
174550a7375SFelipe Balbi musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
175550a7375SFelipe Balbi }
176550a7375SFelipe Balbi }
177550a7375SFelipe Balbi
178550a7375SFelipe Balbi spin_unlock_irqrestore(&musb->lock, flags);
179550a7375SFelipe Balbi }
180550a7375SFelipe Balbi
tusb_omap_dma_program(struct dma_channel * channel,u16 packet_sz,u8 rndis_mode,dma_addr_t dma_addr,u32 len)181550a7375SFelipe Balbi static int tusb_omap_dma_program(struct dma_channel *channel, u16 packet_sz,
182550a7375SFelipe Balbi u8 rndis_mode, dma_addr_t dma_addr, u32 len)
183550a7375SFelipe Balbi {
184550a7375SFelipe Balbi struct tusb_omap_dma_ch *chdat = to_chdat(channel);
185550a7375SFelipe Balbi struct tusb_omap_dma *tusb_dma = chdat->tusb_dma;
186550a7375SFelipe Balbi struct musb *musb = chdat->musb;
1871d0f11b3STony Lindgren struct device *dev = musb->controller;
188550a7375SFelipe Balbi struct musb_hw_ep *hw_ep = chdat->hw_ep;
189550a7375SFelipe Balbi void __iomem *mbase = musb->mregs;
190550a7375SFelipe Balbi void __iomem *ep_conf = hw_ep->conf;
1919c691cc9SPeter Ujfalusi dma_addr_t fifo_addr = hw_ep->fifo_sync;
192550a7375SFelipe Balbi u32 dma_remaining;
193550a7375SFelipe Balbi u16 csr;
1946df2b42fSPeter Ujfalusi u32 psize;
1951df9d9ecSPeter Ujfalusi struct tusb_dma_data *dma_data;
1969c691cc9SPeter Ujfalusi struct dma_async_tx_descriptor *dma_desc;
1979c691cc9SPeter Ujfalusi struct dma_slave_config dma_cfg;
1989c691cc9SPeter Ujfalusi enum dma_transfer_direction dma_dir;
1999c691cc9SPeter Ujfalusi u32 port_window;
2009c691cc9SPeter Ujfalusi int ret;
201550a7375SFelipe Balbi
202550a7375SFelipe Balbi if (unlikely(dma_addr & 0x1) || (len < 32) || (len > packet_sz))
203550a7375SFelipe Balbi return false;
204550a7375SFelipe Balbi
205550a7375SFelipe Balbi /*
206550a7375SFelipe Balbi * HW issue #10: Async dma will eventually corrupt the XFR_SIZE
207550a7375SFelipe Balbi * register which will cause missed DMA interrupt. We could try to
208550a7375SFelipe Balbi * use a timer for the callback, but it is unsafe as the XFR_SIZE
209550a7375SFelipe Balbi * register is corrupt, and we won't know if the DMA worked.
210550a7375SFelipe Balbi */
211550a7375SFelipe Balbi if (dma_addr & 0x2)
212550a7375SFelipe Balbi return false;
213550a7375SFelipe Balbi
214550a7375SFelipe Balbi /*
215550a7375SFelipe Balbi * Because of HW issue #10, it seems like mixing sync DMA and async
216550a7375SFelipe Balbi * PIO access can confuse the DMA. Make sure XFR_SIZE is reset before
217550a7375SFelipe Balbi * using the channel for DMA.
218550a7375SFelipe Balbi */
219550a7375SFelipe Balbi if (chdat->tx)
220550a7375SFelipe Balbi dma_remaining = musb_readl(ep_conf, TUSB_EP_TX_OFFSET);
221550a7375SFelipe Balbi else
222550a7375SFelipe Balbi dma_remaining = musb_readl(ep_conf, TUSB_EP_RX_OFFSET);
223550a7375SFelipe Balbi
224550a7375SFelipe Balbi dma_remaining = TUSB_EP_CONFIG_XFR_SIZE(dma_remaining);
225550a7375SFelipe Balbi if (dma_remaining) {
2269c691cc9SPeter Ujfalusi dev_dbg(musb->controller, "Busy %s dma, not using: %08x\n",
2279c691cc9SPeter Ujfalusi chdat->tx ? "tx" : "rx", dma_remaining);
228550a7375SFelipe Balbi return false;
229550a7375SFelipe Balbi }
230550a7375SFelipe Balbi
231550a7375SFelipe Balbi chdat->transfer_len = len & ~0x1f;
232550a7375SFelipe Balbi
233550a7375SFelipe Balbi if (len < packet_sz)
234550a7375SFelipe Balbi chdat->transfer_packet_sz = chdat->transfer_len;
235550a7375SFelipe Balbi else
236550a7375SFelipe Balbi chdat->transfer_packet_sz = packet_sz;
237550a7375SFelipe Balbi
2384cadc711SPeter Ujfalusi dma_data = chdat->dma_data;
2394cadc711SPeter Ujfalusi if (!tusb_dma->multichannel) {
240550a7375SFelipe Balbi if (tusb_omap_use_shared_dmareq(chdat) != 0) {
2415c8a86e1SFelipe Balbi dev_dbg(musb->controller, "could not get dma for ep%i\n", chdat->epnum);
242550a7375SFelipe Balbi return false;
243550a7375SFelipe Balbi }
2449c691cc9SPeter Ujfalusi if (dma_data->dmareq < 0) {
245550a7375SFelipe Balbi /* REVISIT: This should get blocked earlier, happens
246550a7375SFelipe Balbi * with MSC ErrorRecoveryTest
247550a7375SFelipe Balbi */
248550a7375SFelipe Balbi WARN_ON(1);
249550a7375SFelipe Balbi return false;
250550a7375SFelipe Balbi }
2514cadc711SPeter Ujfalusi }
252550a7375SFelipe Balbi
253550a7375SFelipe Balbi chdat->packet_sz = packet_sz;
254550a7375SFelipe Balbi chdat->len = len;
255550a7375SFelipe Balbi channel->actual_len = 0;
2561d0f11b3STony Lindgren chdat->dma_addr = dma_addr;
257550a7375SFelipe Balbi channel->status = MUSB_DMA_STATUS_BUSY;
258550a7375SFelipe Balbi
259550a7375SFelipe Balbi /* Since we're recycling dma areas, we need to clean or invalidate */
2609c691cc9SPeter Ujfalusi if (chdat->tx) {
2619c691cc9SPeter Ujfalusi dma_dir = DMA_MEM_TO_DEV;
2621d0f11b3STony Lindgren dma_map_single(dev, phys_to_virt(dma_addr), len,
2631d0f11b3STony Lindgren DMA_TO_DEVICE);
2649c691cc9SPeter Ujfalusi } else {
2659c691cc9SPeter Ujfalusi dma_dir = DMA_DEV_TO_MEM;
2661d0f11b3STony Lindgren dma_map_single(dev, phys_to_virt(dma_addr), len,
2671d0f11b3STony Lindgren DMA_FROM_DEVICE);
2689c691cc9SPeter Ujfalusi }
2699c691cc9SPeter Ujfalusi
2709c691cc9SPeter Ujfalusi memset(&dma_cfg, 0, sizeof(dma_cfg));
271550a7375SFelipe Balbi
272550a7375SFelipe Balbi /* Use 16-bit transfer if dma_addr is not 32-bit aligned */
273550a7375SFelipe Balbi if ((dma_addr & 0x3) == 0) {
2749c691cc9SPeter Ujfalusi dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2759c691cc9SPeter Ujfalusi dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
2769c691cc9SPeter Ujfalusi port_window = 8;
277550a7375SFelipe Balbi } else {
2789c691cc9SPeter Ujfalusi dma_cfg.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
2799c691cc9SPeter Ujfalusi dma_cfg.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES;
2809c691cc9SPeter Ujfalusi port_window = 16;
2819c691cc9SPeter Ujfalusi
2829c691cc9SPeter Ujfalusi fifo_addr = hw_ep->fifo_async;
283550a7375SFelipe Balbi }
284550a7375SFelipe Balbi
2859c691cc9SPeter Ujfalusi dev_dbg(musb->controller,
2869c691cc9SPeter Ujfalusi "ep%i %s dma: %pad len: %u(%u) packet_sz: %i(%i)\n",
2879c691cc9SPeter Ujfalusi chdat->epnum, chdat->tx ? "tx" : "rx", &dma_addr,
2889c691cc9SPeter Ujfalusi chdat->transfer_len, len, chdat->transfer_packet_sz, packet_sz);
289550a7375SFelipe Balbi
2909c691cc9SPeter Ujfalusi dma_cfg.src_addr = fifo_addr;
2919c691cc9SPeter Ujfalusi dma_cfg.dst_addr = fifo_addr;
2929c691cc9SPeter Ujfalusi dma_cfg.src_port_window_size = port_window;
2939c691cc9SPeter Ujfalusi dma_cfg.src_maxburst = port_window;
2949c691cc9SPeter Ujfalusi dma_cfg.dst_port_window_size = port_window;
2959c691cc9SPeter Ujfalusi dma_cfg.dst_maxburst = port_window;
296550a7375SFelipe Balbi
2979c691cc9SPeter Ujfalusi ret = dmaengine_slave_config(dma_data->chan, &dma_cfg);
2989c691cc9SPeter Ujfalusi if (ret) {
2999c691cc9SPeter Ujfalusi dev_err(musb->controller, "DMA slave config failed: %d\n", ret);
3009c691cc9SPeter Ujfalusi return false;
301550a7375SFelipe Balbi }
302550a7375SFelipe Balbi
3039c691cc9SPeter Ujfalusi dma_desc = dmaengine_prep_slave_single(dma_data->chan, dma_addr,
3049c691cc9SPeter Ujfalusi chdat->transfer_len, dma_dir,
3059c691cc9SPeter Ujfalusi DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
3069c691cc9SPeter Ujfalusi if (!dma_desc) {
3079c691cc9SPeter Ujfalusi dev_err(musb->controller, "DMA prep_slave_single failed\n");
3089c691cc9SPeter Ujfalusi return false;
3099c691cc9SPeter Ujfalusi }
3109c691cc9SPeter Ujfalusi
3119c691cc9SPeter Ujfalusi dma_desc->callback = tusb_omap_dma_cb;
3129c691cc9SPeter Ujfalusi dma_desc->callback_param = channel;
3139c691cc9SPeter Ujfalusi dmaengine_submit(dma_desc);
3149c691cc9SPeter Ujfalusi
3159c691cc9SPeter Ujfalusi dev_dbg(musb->controller,
3169c691cc9SPeter Ujfalusi "ep%i %s using %i-bit %s dma from %pad to %pad\n",
317550a7375SFelipe Balbi chdat->epnum, chdat->tx ? "tx" : "rx",
3189c691cc9SPeter Ujfalusi dma_cfg.src_addr_width * 8,
319550a7375SFelipe Balbi ((dma_addr & 0x3) == 0) ? "sync" : "async",
3209c691cc9SPeter Ujfalusi (dma_dir == DMA_MEM_TO_DEV) ? &dma_addr : &fifo_addr,
3219c691cc9SPeter Ujfalusi (dma_dir == DMA_MEM_TO_DEV) ? &fifo_addr : &dma_addr);
322550a7375SFelipe Balbi
323550a7375SFelipe Balbi /*
324550a7375SFelipe Balbi * Prepare MUSB for DMA transfer
325550a7375SFelipe Balbi */
326550a7375SFelipe Balbi musb_ep_select(mbase, chdat->epnum);
3273565b787SPeter Ujfalusi if (chdat->tx) {
328550a7375SFelipe Balbi csr = musb_readw(hw_ep->regs, MUSB_TXCSR);
329550a7375SFelipe Balbi csr |= (MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAENAB
330550a7375SFelipe Balbi | MUSB_TXCSR_DMAMODE | MUSB_TXCSR_MODE);
331550a7375SFelipe Balbi csr &= ~MUSB_TXCSR_P_UNDERRUN;
332550a7375SFelipe Balbi musb_writew(hw_ep->regs, MUSB_TXCSR, csr);
333550a7375SFelipe Balbi } else {
334550a7375SFelipe Balbi csr = musb_readw(hw_ep->regs, MUSB_RXCSR);
335550a7375SFelipe Balbi csr |= MUSB_RXCSR_DMAENAB;
336550a7375SFelipe Balbi csr &= ~(MUSB_RXCSR_AUTOCLEAR | MUSB_RXCSR_DMAMODE);
337550a7375SFelipe Balbi musb_writew(hw_ep->regs, MUSB_RXCSR,
338550a7375SFelipe Balbi csr | MUSB_RXCSR_P_WZC_BITS);
339550a7375SFelipe Balbi }
340550a7375SFelipe Balbi
3419c691cc9SPeter Ujfalusi /* Start DMA transfer */
3429c691cc9SPeter Ujfalusi dma_async_issue_pending(dma_data->chan);
343550a7375SFelipe Balbi
344550a7375SFelipe Balbi if (chdat->tx) {
345550a7375SFelipe Balbi /* Send transfer_packet_sz packets at a time */
3466df2b42fSPeter Ujfalusi psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
3476df2b42fSPeter Ujfalusi psize &= ~0x7ff;
3486df2b42fSPeter Ujfalusi psize |= chdat->transfer_packet_sz;
3496df2b42fSPeter Ujfalusi musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
350550a7375SFelipe Balbi
351550a7375SFelipe Balbi musb_writel(ep_conf, TUSB_EP_TX_OFFSET,
352550a7375SFelipe Balbi TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
353550a7375SFelipe Balbi } else {
354550a7375SFelipe Balbi /* Receive transfer_packet_sz packets at a time */
3556df2b42fSPeter Ujfalusi psize = musb_readl(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET);
3566df2b42fSPeter Ujfalusi psize &= ~(0x7ff << 16);
3576df2b42fSPeter Ujfalusi psize |= (chdat->transfer_packet_sz << 16);
3586df2b42fSPeter Ujfalusi musb_writel(ep_conf, TUSB_EP_MAX_PACKET_SIZE_OFFSET, psize);
359550a7375SFelipe Balbi
360550a7375SFelipe Balbi musb_writel(ep_conf, TUSB_EP_RX_OFFSET,
361550a7375SFelipe Balbi TUSB_EP_CONFIG_XFR_SIZE(chdat->transfer_len));
362550a7375SFelipe Balbi }
363550a7375SFelipe Balbi
364550a7375SFelipe Balbi return true;
365550a7375SFelipe Balbi }
366550a7375SFelipe Balbi
tusb_omap_dma_abort(struct dma_channel * channel)367550a7375SFelipe Balbi static int tusb_omap_dma_abort(struct dma_channel *channel)
368550a7375SFelipe Balbi {
369550a7375SFelipe Balbi struct tusb_omap_dma_ch *chdat = to_chdat(channel);
370550a7375SFelipe Balbi
3714cadc711SPeter Ujfalusi if (chdat->dma_data)
3729c691cc9SPeter Ujfalusi dmaengine_terminate_all(chdat->dma_data->chan);
373550a7375SFelipe Balbi
374550a7375SFelipe Balbi channel->status = MUSB_DMA_STATUS_FREE;
375550a7375SFelipe Balbi
376550a7375SFelipe Balbi return 0;
377550a7375SFelipe Balbi }
378550a7375SFelipe Balbi
tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch * chdat)379550a7375SFelipe Balbi static inline int tusb_omap_dma_allocate_dmareq(struct tusb_omap_dma_ch *chdat)
380550a7375SFelipe Balbi {
381550a7375SFelipe Balbi u32 reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
382550a7375SFelipe Balbi int i, dmareq_nr = -1;
383550a7375SFelipe Balbi
384550a7375SFelipe Balbi for (i = 0; i < MAX_DMAREQ; i++) {
385550a7375SFelipe Balbi int cur = (reg & (0xf << (i * 5))) >> (i * 5);
386550a7375SFelipe Balbi if (cur == 0) {
387550a7375SFelipe Balbi dmareq_nr = i;
388550a7375SFelipe Balbi break;
389550a7375SFelipe Balbi }
390550a7375SFelipe Balbi }
391550a7375SFelipe Balbi
392550a7375SFelipe Balbi if (dmareq_nr == -1)
393550a7375SFelipe Balbi return -EAGAIN;
394550a7375SFelipe Balbi
395550a7375SFelipe Balbi reg |= (chdat->epnum << (dmareq_nr * 5));
396550a7375SFelipe Balbi if (chdat->tx)
397550a7375SFelipe Balbi reg |= ((1 << 4) << (dmareq_nr * 5));
398550a7375SFelipe Balbi musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
399550a7375SFelipe Balbi
4004cadc711SPeter Ujfalusi chdat->dma_data = &chdat->tusb_dma->dma_pool[dmareq_nr];
401550a7375SFelipe Balbi
402550a7375SFelipe Balbi return 0;
403550a7375SFelipe Balbi }
404550a7375SFelipe Balbi
tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch * chdat)405550a7375SFelipe Balbi static inline void tusb_omap_dma_free_dmareq(struct tusb_omap_dma_ch *chdat)
406550a7375SFelipe Balbi {
407550a7375SFelipe Balbi u32 reg;
408550a7375SFelipe Balbi
4094cadc711SPeter Ujfalusi if (!chdat || !chdat->dma_data || chdat->dma_data->dmareq < 0)
410550a7375SFelipe Balbi return;
411550a7375SFelipe Balbi
412550a7375SFelipe Balbi reg = musb_readl(chdat->tbase, TUSB_DMA_EP_MAP);
4134cadc711SPeter Ujfalusi reg &= ~(0x1f << (chdat->dma_data->dmareq * 5));
414550a7375SFelipe Balbi musb_writel(chdat->tbase, TUSB_DMA_EP_MAP, reg);
415550a7375SFelipe Balbi
4164cadc711SPeter Ujfalusi chdat->dma_data = NULL;
417550a7375SFelipe Balbi }
418550a7375SFelipe Balbi
419550a7375SFelipe Balbi static struct dma_channel *dma_channel_pool[MAX_DMAREQ];
420550a7375SFelipe Balbi
421550a7375SFelipe Balbi static struct dma_channel *
tusb_omap_dma_allocate(struct dma_controller * c,struct musb_hw_ep * hw_ep,u8 tx)422550a7375SFelipe Balbi tusb_omap_dma_allocate(struct dma_controller *c,
423550a7375SFelipe Balbi struct musb_hw_ep *hw_ep,
424550a7375SFelipe Balbi u8 tx)
425550a7375SFelipe Balbi {
426550a7375SFelipe Balbi int ret, i;
427550a7375SFelipe Balbi struct tusb_omap_dma *tusb_dma;
428550a7375SFelipe Balbi struct musb *musb;
429550a7375SFelipe Balbi struct dma_channel *channel = NULL;
430550a7375SFelipe Balbi struct tusb_omap_dma_ch *chdat = NULL;
4311df9d9ecSPeter Ujfalusi struct tusb_dma_data *dma_data = NULL;
432550a7375SFelipe Balbi
433550a7375SFelipe Balbi tusb_dma = container_of(c, struct tusb_omap_dma, controller);
434a96ca0d2SAlexandre Bailon musb = tusb_dma->controller.musb;
435550a7375SFelipe Balbi
436550a7375SFelipe Balbi /* REVISIT: Why does dmareq5 not work? */
437550a7375SFelipe Balbi if (hw_ep->epnum == 0) {
4385c8a86e1SFelipe Balbi dev_dbg(musb->controller, "Not allowing DMA for ep0 %s\n", tx ? "tx" : "rx");
439550a7375SFelipe Balbi return NULL;
440550a7375SFelipe Balbi }
441550a7375SFelipe Balbi
442550a7375SFelipe Balbi for (i = 0; i < MAX_DMAREQ; i++) {
443550a7375SFelipe Balbi struct dma_channel *ch = dma_channel_pool[i];
444550a7375SFelipe Balbi if (ch->status == MUSB_DMA_STATUS_UNKNOWN) {
445550a7375SFelipe Balbi ch->status = MUSB_DMA_STATUS_FREE;
446550a7375SFelipe Balbi channel = ch;
447550a7375SFelipe Balbi chdat = ch->private_data;
448550a7375SFelipe Balbi break;
449550a7375SFelipe Balbi }
450550a7375SFelipe Balbi }
451550a7375SFelipe Balbi
452550a7375SFelipe Balbi if (!channel)
453550a7375SFelipe Balbi return NULL;
454550a7375SFelipe Balbi
455a96ca0d2SAlexandre Bailon chdat->musb = tusb_dma->controller.musb;
456550a7375SFelipe Balbi chdat->tbase = tusb_dma->tbase;
457550a7375SFelipe Balbi chdat->hw_ep = hw_ep;
458550a7375SFelipe Balbi chdat->epnum = hw_ep->epnum;
459550a7375SFelipe Balbi chdat->completed_len = 0;
460550a7375SFelipe Balbi chdat->tusb_dma = tusb_dma;
4611df9d9ecSPeter Ujfalusi if (tx)
4621df9d9ecSPeter Ujfalusi chdat->tx = 1;
4631df9d9ecSPeter Ujfalusi else
4641df9d9ecSPeter Ujfalusi chdat->tx = 0;
465550a7375SFelipe Balbi
466550a7375SFelipe Balbi channel->max_len = 0x7fffffff;
467550a7375SFelipe Balbi channel->desired_mode = 0;
468550a7375SFelipe Balbi channel->actual_len = 0;
469550a7375SFelipe Balbi
4704cadc711SPeter Ujfalusi if (!chdat->dma_data) {
471550a7375SFelipe Balbi if (tusb_dma->multichannel) {
472550a7375SFelipe Balbi ret = tusb_omap_dma_allocate_dmareq(chdat);
473550a7375SFelipe Balbi if (ret != 0)
474550a7375SFelipe Balbi goto free_dmareq;
4751df9d9ecSPeter Ujfalusi } else {
4764cadc711SPeter Ujfalusi chdat->dma_data = &tusb_dma->dma_pool[0];
477550a7375SFelipe Balbi }
4784cadc711SPeter Ujfalusi }
4794cadc711SPeter Ujfalusi
4804cadc711SPeter Ujfalusi dma_data = chdat->dma_data;
481550a7375SFelipe Balbi
4829c691cc9SPeter Ujfalusi dev_dbg(musb->controller, "ep%i %s dma: %s dmareq%i\n",
483550a7375SFelipe Balbi chdat->epnum,
484550a7375SFelipe Balbi chdat->tx ? "tx" : "rx",
4854cadc711SPeter Ujfalusi tusb_dma->multichannel ? "shared" : "dedicated",
4869c691cc9SPeter Ujfalusi dma_data->dmareq);
487550a7375SFelipe Balbi
488550a7375SFelipe Balbi return channel;
489550a7375SFelipe Balbi
490550a7375SFelipe Balbi free_dmareq:
491550a7375SFelipe Balbi tusb_omap_dma_free_dmareq(chdat);
492550a7375SFelipe Balbi
4935c8a86e1SFelipe Balbi dev_dbg(musb->controller, "ep%i: Could not get a DMA channel\n", chdat->epnum);
494550a7375SFelipe Balbi channel->status = MUSB_DMA_STATUS_UNKNOWN;
495550a7375SFelipe Balbi
496550a7375SFelipe Balbi return NULL;
497550a7375SFelipe Balbi }
498550a7375SFelipe Balbi
tusb_omap_dma_release(struct dma_channel * channel)499550a7375SFelipe Balbi static void tusb_omap_dma_release(struct dma_channel *channel)
500550a7375SFelipe Balbi {
501550a7375SFelipe Balbi struct tusb_omap_dma_ch *chdat = to_chdat(channel);
502550a7375SFelipe Balbi struct musb *musb = chdat->musb;
503550a7375SFelipe Balbi
5049c691cc9SPeter Ujfalusi dev_dbg(musb->controller, "Release for ep%i\n", chdat->epnum);
505550a7375SFelipe Balbi
506550a7375SFelipe Balbi channel->status = MUSB_DMA_STATUS_UNKNOWN;
507550a7375SFelipe Balbi
5089c691cc9SPeter Ujfalusi dmaengine_terminate_sync(chdat->dma_data->chan);
509550a7375SFelipe Balbi tusb_omap_dma_free_dmareq(chdat);
510550a7375SFelipe Balbi
511550a7375SFelipe Balbi channel = NULL;
512550a7375SFelipe Balbi }
513550a7375SFelipe Balbi
tusb_dma_controller_destroy(struct dma_controller * c)5147f6283edSTony Lindgren void tusb_dma_controller_destroy(struct dma_controller *c)
515550a7375SFelipe Balbi {
516550a7375SFelipe Balbi struct tusb_omap_dma *tusb_dma;
517550a7375SFelipe Balbi int i;
518550a7375SFelipe Balbi
519550a7375SFelipe Balbi tusb_dma = container_of(c, struct tusb_omap_dma, controller);
520550a7375SFelipe Balbi for (i = 0; i < MAX_DMAREQ; i++) {
521550a7375SFelipe Balbi struct dma_channel *ch = dma_channel_pool[i];
522550a7375SFelipe Balbi if (ch) {
523550a7375SFelipe Balbi kfree(ch->private_data);
524550a7375SFelipe Balbi kfree(ch);
525550a7375SFelipe Balbi }
526550a7375SFelipe Balbi
5274cadc711SPeter Ujfalusi /* Free up the DMA channels */
5289c691cc9SPeter Ujfalusi if (tusb_dma && tusb_dma->dma_pool[i].chan)
5299c691cc9SPeter Ujfalusi dma_release_channel(tusb_dma->dma_pool[i].chan);
5304cadc711SPeter Ujfalusi }
531550a7375SFelipe Balbi
532550a7375SFelipe Balbi kfree(tusb_dma);
533550a7375SFelipe Balbi }
5347f6283edSTony Lindgren EXPORT_SYMBOL_GPL(tusb_dma_controller_destroy);
535550a7375SFelipe Balbi
tusb_omap_allocate_dma_pool(struct tusb_omap_dma * tusb_dma)5364cadc711SPeter Ujfalusi static int tusb_omap_allocate_dma_pool(struct tusb_omap_dma *tusb_dma)
5374cadc711SPeter Ujfalusi {
5389c691cc9SPeter Ujfalusi struct musb *musb = tusb_dma->controller.musb;
5394cadc711SPeter Ujfalusi int i;
5404cadc711SPeter Ujfalusi int ret = 0;
5414cadc711SPeter Ujfalusi
5424cadc711SPeter Ujfalusi for (i = 0; i < MAX_DMAREQ; i++) {
5434cadc711SPeter Ujfalusi struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
5444cadc711SPeter Ujfalusi
5454cadc711SPeter Ujfalusi /*
5464cadc711SPeter Ujfalusi * Request DMA channels:
5474cadc711SPeter Ujfalusi * - one channel in case of non multichannel mode
5484cadc711SPeter Ujfalusi * - MAX_DMAREQ number of channels in multichannel mode
5494cadc711SPeter Ujfalusi */
5504cadc711SPeter Ujfalusi if (i == 0 || tusb_dma->multichannel) {
5514cadc711SPeter Ujfalusi char ch_name[8];
5524cadc711SPeter Ujfalusi
5534cadc711SPeter Ujfalusi sprintf(ch_name, "dmareq%d", i);
5549c691cc9SPeter Ujfalusi dma_data->chan = dma_request_chan(musb->controller,
5559c691cc9SPeter Ujfalusi ch_name);
5569c691cc9SPeter Ujfalusi if (IS_ERR(dma_data->chan)) {
5579c691cc9SPeter Ujfalusi dev_err(musb->controller,
5584cadc711SPeter Ujfalusi "Failed to request %s\n", ch_name);
5599c691cc9SPeter Ujfalusi ret = PTR_ERR(dma_data->chan);
5604cadc711SPeter Ujfalusi goto dma_error;
5614cadc711SPeter Ujfalusi }
5624cadc711SPeter Ujfalusi
5634cadc711SPeter Ujfalusi dma_data->dmareq = i;
5644cadc711SPeter Ujfalusi } else {
5654cadc711SPeter Ujfalusi dma_data->dmareq = -1;
5664cadc711SPeter Ujfalusi }
5674cadc711SPeter Ujfalusi }
5684cadc711SPeter Ujfalusi
5694cadc711SPeter Ujfalusi return 0;
5704cadc711SPeter Ujfalusi
5714cadc711SPeter Ujfalusi dma_error:
5724cadc711SPeter Ujfalusi for (; i >= 0; i--) {
5734cadc711SPeter Ujfalusi struct tusb_dma_data *dma_data = &tusb_dma->dma_pool[i];
5744cadc711SPeter Ujfalusi
5759c691cc9SPeter Ujfalusi if (dma_data->dmareq >= 0)
5769c691cc9SPeter Ujfalusi dma_release_channel(dma_data->chan);
5774cadc711SPeter Ujfalusi }
5784cadc711SPeter Ujfalusi
5794cadc711SPeter Ujfalusi return ret;
5804cadc711SPeter Ujfalusi }
5814cadc711SPeter Ujfalusi
5827f6283edSTony Lindgren struct dma_controller *
tusb_dma_controller_create(struct musb * musb,void __iomem * base)5837f6283edSTony Lindgren tusb_dma_controller_create(struct musb *musb, void __iomem *base)
584550a7375SFelipe Balbi {
585550a7375SFelipe Balbi void __iomem *tbase = musb->ctrl_base;
586550a7375SFelipe Balbi struct tusb_omap_dma *tusb_dma;
587550a7375SFelipe Balbi int i;
588550a7375SFelipe Balbi
589550a7375SFelipe Balbi /* REVISIT: Get dmareq lines used from board-*.c */
590550a7375SFelipe Balbi
591550a7375SFelipe Balbi musb_writel(musb->ctrl_base, TUSB_DMA_INT_MASK, 0x7fffffff);
592550a7375SFelipe Balbi musb_writel(musb->ctrl_base, TUSB_DMA_EP_MAP, 0);
593550a7375SFelipe Balbi
594550a7375SFelipe Balbi musb_writel(tbase, TUSB_DMA_REQ_CONF,
595550a7375SFelipe Balbi TUSB_DMA_REQ_CONF_BURST_SIZE(2)
596550a7375SFelipe Balbi | TUSB_DMA_REQ_CONF_DMA_REQ_EN(0x3f)
597550a7375SFelipe Balbi | TUSB_DMA_REQ_CONF_DMA_REQ_ASSER(2));
598550a7375SFelipe Balbi
599550a7375SFelipe Balbi tusb_dma = kzalloc(sizeof(struct tusb_omap_dma), GFP_KERNEL);
600550a7375SFelipe Balbi if (!tusb_dma)
601c88ba39cSHuzaifa Sidhpurwala goto out;
602550a7375SFelipe Balbi
603a96ca0d2SAlexandre Bailon tusb_dma->controller.musb = musb;
604550a7375SFelipe Balbi tusb_dma->tbase = musb->ctrl_base;
605550a7375SFelipe Balbi
606550a7375SFelipe Balbi tusb_dma->controller.channel_alloc = tusb_omap_dma_allocate;
607550a7375SFelipe Balbi tusb_dma->controller.channel_release = tusb_omap_dma_release;
608550a7375SFelipe Balbi tusb_dma->controller.channel_program = tusb_omap_dma_program;
609550a7375SFelipe Balbi tusb_dma->controller.channel_abort = tusb_omap_dma_abort;
610550a7375SFelipe Balbi
6117751b6fbSMatwey V. Kornilov if (musb->tusb_revision >= TUSB_REV_30)
612550a7375SFelipe Balbi tusb_dma->multichannel = 1;
613550a7375SFelipe Balbi
614550a7375SFelipe Balbi for (i = 0; i < MAX_DMAREQ; i++) {
615550a7375SFelipe Balbi struct dma_channel *ch;
616550a7375SFelipe Balbi struct tusb_omap_dma_ch *chdat;
617550a7375SFelipe Balbi
618550a7375SFelipe Balbi ch = kzalloc(sizeof(struct dma_channel), GFP_KERNEL);
619550a7375SFelipe Balbi if (!ch)
620550a7375SFelipe Balbi goto cleanup;
621550a7375SFelipe Balbi
622550a7375SFelipe Balbi dma_channel_pool[i] = ch;
623550a7375SFelipe Balbi
624550a7375SFelipe Balbi chdat = kzalloc(sizeof(struct tusb_omap_dma_ch), GFP_KERNEL);
625550a7375SFelipe Balbi if (!chdat)
626550a7375SFelipe Balbi goto cleanup;
627550a7375SFelipe Balbi
628550a7375SFelipe Balbi ch->status = MUSB_DMA_STATUS_UNKNOWN;
629550a7375SFelipe Balbi ch->private_data = chdat;
630550a7375SFelipe Balbi }
631550a7375SFelipe Balbi
6324cadc711SPeter Ujfalusi if (tusb_omap_allocate_dma_pool(tusb_dma))
6334cadc711SPeter Ujfalusi goto cleanup;
6344cadc711SPeter Ujfalusi
635550a7375SFelipe Balbi return &tusb_dma->controller;
636550a7375SFelipe Balbi
637550a7375SFelipe Balbi cleanup:
6387f6283edSTony Lindgren musb_dma_controller_destroy(&tusb_dma->controller);
639c88ba39cSHuzaifa Sidhpurwala out:
640550a7375SFelipe Balbi return NULL;
641550a7375SFelipe Balbi }
6427f6283edSTony Lindgren EXPORT_SYMBOL_GPL(tusb_dma_controller_create);
643