1 // SPDX-License-Identifier: GPL-2.0
2 // (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
3
4 /*
5 * Synopsys DesignWare AXI DMA Controller driver.
6 *
7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
8 */
9
10 #include <linux/bitops.h>
11 #include <linux/delay.h>
12 #include <linux/device.h>
13 #include <linux/dmaengine.h>
14 #include <linux/dmapool.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
17 #include <linux/interrupt.h>
18 #include <linux/io.h>
19 #include <linux/iopoll.h>
20 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/of.h>
24 #include <linux/of_dma.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_runtime.h>
27 #include <linux/property.h>
28 #include <linux/reset.h>
29 #include <linux/slab.h>
30 #include <linux/types.h>
31
32 #include "dw-axi-dmac.h"
33 #include "../dmaengine.h"
34 #include "../virt-dma.h"
35
36 /*
37 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
38 * master data bus width up to 512 bits (for both AXI master interfaces), but
39 * it depends on IP block configuration.
40 */
41 #define AXI_DMA_BUSWIDTHS \
42 (DMA_SLAVE_BUSWIDTH_1_BYTE | \
43 DMA_SLAVE_BUSWIDTH_2_BYTES | \
44 DMA_SLAVE_BUSWIDTH_4_BYTES | \
45 DMA_SLAVE_BUSWIDTH_8_BYTES | \
46 DMA_SLAVE_BUSWIDTH_16_BYTES | \
47 DMA_SLAVE_BUSWIDTH_32_BYTES | \
48 DMA_SLAVE_BUSWIDTH_64_BYTES)
49
50 #define AXI_DMA_FLAG_HAS_APB_REGS BIT(0)
51 #define AXI_DMA_FLAG_HAS_RESETS BIT(1)
52 #define AXI_DMA_FLAG_USE_CFG2 BIT(2)
53
54 static inline void
axi_dma_iowrite32(struct axi_dma_chip * chip,u32 reg,u32 val)55 axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
56 {
57 iowrite32(val, chip->regs + reg);
58 }
59
axi_dma_ioread32(struct axi_dma_chip * chip,u32 reg)60 static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
61 {
62 return ioread32(chip->regs + reg);
63 }
64
65 static inline void
axi_dma_iowrite64(struct axi_dma_chip * chip,u32 reg,u64 val)66 axi_dma_iowrite64(struct axi_dma_chip *chip, u32 reg, u64 val)
67 {
68 iowrite64(val, chip->regs + reg);
69 }
70
axi_dma_ioread64(struct axi_dma_chip * chip,u32 reg)71 static inline u64 axi_dma_ioread64(struct axi_dma_chip *chip, u32 reg)
72 {
73 return ioread64(chip->regs + reg);
74 }
75
76 static inline void
axi_chan_iowrite32(struct axi_dma_chan * chan,u32 reg,u32 val)77 axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
78 {
79 iowrite32(val, chan->chan_regs + reg);
80 }
81
axi_chan_ioread32(struct axi_dma_chan * chan,u32 reg)82 static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
83 {
84 return ioread32(chan->chan_regs + reg);
85 }
86
87 static inline void
axi_chan_iowrite64(struct axi_dma_chan * chan,u32 reg,u64 val)88 axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
89 {
90 /*
91 * We split one 64 bit write for two 32 bit write as some HW doesn't
92 * support 64 bit access.
93 */
94 iowrite32(lower_32_bits(val), chan->chan_regs + reg);
95 iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
96 }
97
axi_chan_config_write(struct axi_dma_chan * chan,struct axi_dma_chan_config * config)98 static inline void axi_chan_config_write(struct axi_dma_chan *chan,
99 struct axi_dma_chan_config *config)
100 {
101 u32 cfg_lo, cfg_hi;
102
103 cfg_lo = (config->dst_multblk_type << CH_CFG_L_DST_MULTBLK_TYPE_POS |
104 config->src_multblk_type << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
105 if (chan->chip->dw->hdata->reg_map_8_channels &&
106 !chan->chip->dw->hdata->use_cfg2) {
107 cfg_hi = config->tt_fc << CH_CFG_H_TT_FC_POS |
108 config->hs_sel_src << CH_CFG_H_HS_SEL_SRC_POS |
109 config->hs_sel_dst << CH_CFG_H_HS_SEL_DST_POS |
110 config->src_per << CH_CFG_H_SRC_PER_POS |
111 config->dst_per << CH_CFG_H_DST_PER_POS |
112 config->prior << CH_CFG_H_PRIORITY_POS;
113 } else {
114 cfg_lo |= config->src_per << CH_CFG2_L_SRC_PER_POS |
115 config->dst_per << CH_CFG2_L_DST_PER_POS;
116 cfg_hi = config->tt_fc << CH_CFG2_H_TT_FC_POS |
117 config->hs_sel_src << CH_CFG2_H_HS_SEL_SRC_POS |
118 config->hs_sel_dst << CH_CFG2_H_HS_SEL_DST_POS |
119 config->prior << CH_CFG2_H_PRIORITY_POS;
120 }
121 axi_chan_iowrite32(chan, CH_CFG_L, cfg_lo);
122 axi_chan_iowrite32(chan, CH_CFG_H, cfg_hi);
123 }
124
axi_dma_disable(struct axi_dma_chip * chip)125 static inline void axi_dma_disable(struct axi_dma_chip *chip)
126 {
127 u32 val;
128
129 val = axi_dma_ioread32(chip, DMAC_CFG);
130 val &= ~DMAC_EN_MASK;
131 axi_dma_iowrite32(chip, DMAC_CFG, val);
132 }
133
axi_dma_enable(struct axi_dma_chip * chip)134 static inline void axi_dma_enable(struct axi_dma_chip *chip)
135 {
136 u32 val;
137
138 val = axi_dma_ioread32(chip, DMAC_CFG);
139 val |= DMAC_EN_MASK;
140 axi_dma_iowrite32(chip, DMAC_CFG, val);
141 }
142
axi_dma_irq_disable(struct axi_dma_chip * chip)143 static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
144 {
145 u32 val;
146
147 val = axi_dma_ioread32(chip, DMAC_CFG);
148 val &= ~INT_EN_MASK;
149 axi_dma_iowrite32(chip, DMAC_CFG, val);
150 }
151
axi_dma_irq_enable(struct axi_dma_chip * chip)152 static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
153 {
154 u32 val;
155
156 val = axi_dma_ioread32(chip, DMAC_CFG);
157 val |= INT_EN_MASK;
158 axi_dma_iowrite32(chip, DMAC_CFG, val);
159 }
160
axi_chan_irq_disable(struct axi_dma_chan * chan,u32 irq_mask)161 static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
162 {
163 u32 val;
164
165 if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
166 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
167 } else {
168 val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
169 val &= ~irq_mask;
170 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
171 }
172 }
173
axi_chan_irq_set(struct axi_dma_chan * chan,u32 irq_mask)174 static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
175 {
176 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
177 }
178
axi_chan_irq_sig_set(struct axi_dma_chan * chan,u32 irq_mask)179 static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
180 {
181 axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
182 }
183
axi_chan_irq_clear(struct axi_dma_chan * chan,u32 irq_mask)184 static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
185 {
186 axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
187 }
188
axi_chan_irq_read(struct axi_dma_chan * chan)189 static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
190 {
191 return axi_chan_ioread32(chan, CH_INTSTATUS);
192 }
193
axi_chan_disable(struct axi_dma_chan * chan)194 static inline void axi_chan_disable(struct axi_dma_chan *chan)
195 {
196 u64 val;
197
198 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
199 val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
200 if (chan->id >= DMAC_CHAN_16) {
201 val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)
202 << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
203 val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
204 << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
205 } else {
206 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
207 val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
208 }
209 axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);
210 } else {
211 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
212 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
213 if (chan->chip->dw->hdata->reg_map_8_channels)
214 val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
215 else
216 val |= BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
217 axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
218 }
219 }
220
axi_chan_enable(struct axi_dma_chan * chan)221 static inline void axi_chan_enable(struct axi_dma_chan *chan)
222 {
223 u64 val;
224
225 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
226 val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
227 if (chan->id >= DMAC_CHAN_16) {
228 val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
229 << (DMAC_CHAN_EN_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |
230 (u64)(BIT(chan->id) >> DMAC_CHAN_16)
231 << (DMAC_CHAN_EN2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
232 } else {
233 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
234 BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
235 }
236 axi_dma_iowrite64(chan->chip, DMAC_CHEN, val);
237 } else {
238 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
239 if (chan->chip->dw->hdata->reg_map_8_channels) {
240 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
241 BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
242 } else {
243 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
244 BIT(chan->id) << DMAC_CHAN_EN2_WE_SHIFT;
245 }
246 axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
247 }
248 }
249
axi_chan_is_hw_enable(struct axi_dma_chan * chan)250 static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
251 {
252 u64 val;
253
254 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16)
255 val = axi_dma_ioread64(chan->chip, DMAC_CHEN);
256 else
257 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
258
259 if (chan->id >= DMAC_CHAN_16)
260 return !!(val & ((u64)(BIT(chan->id) >> DMAC_CHAN_16) << DMAC_CHAN_BLOCK_SHIFT));
261 else
262 return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
263 }
264
axi_dma_hw_init(struct axi_dma_chip * chip)265 static void axi_dma_hw_init(struct axi_dma_chip *chip)
266 {
267 int ret;
268 u32 i;
269
270 for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
271 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
272 axi_chan_disable(&chip->dw->chan[i]);
273 }
274 ret = dma_set_mask_and_coherent(chip->dev, DMA_BIT_MASK(64));
275 if (ret)
276 dev_warn(chip->dev, "Unable to set coherent mask\n");
277 }
278
axi_chan_get_xfer_width(struct axi_dma_chan * chan,dma_addr_t src,dma_addr_t dst,size_t len)279 static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
280 dma_addr_t dst, size_t len)
281 {
282 u32 max_width = chan->chip->dw->hdata->m_data_width;
283
284 return __ffs(src | dst | len | BIT(max_width));
285 }
286
axi_chan_name(struct axi_dma_chan * chan)287 static inline const char *axi_chan_name(struct axi_dma_chan *chan)
288 {
289 return dma_chan_name(&chan->vc.chan);
290 }
291
axi_desc_alloc(u32 num)292 static struct axi_dma_desc *axi_desc_alloc(u32 num)
293 {
294 struct axi_dma_desc *desc;
295
296 desc = kzalloc_obj(*desc, GFP_NOWAIT);
297 if (!desc)
298 return NULL;
299
300 desc->hw_desc = kzalloc_objs(*desc->hw_desc, num, GFP_NOWAIT);
301 if (!desc->hw_desc) {
302 kfree(desc);
303 return NULL;
304 }
305 desc->nr_hw_descs = num;
306
307 return desc;
308 }
309
axi_desc_get(struct axi_dma_chan * chan,dma_addr_t * addr)310 static struct axi_dma_lli *axi_desc_get(struct axi_dma_chan *chan,
311 dma_addr_t *addr)
312 {
313 struct axi_dma_lli *lli;
314 dma_addr_t phys;
315
316 lli = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
317 if (unlikely(!lli)) {
318 dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
319 axi_chan_name(chan));
320 return NULL;
321 }
322
323 atomic_inc(&chan->descs_allocated);
324 *addr = phys;
325
326 return lli;
327 }
328
axi_desc_put(struct axi_dma_desc * desc)329 static void axi_desc_put(struct axi_dma_desc *desc)
330 {
331 struct axi_dma_chan *chan = desc->chan;
332 int count = desc->nr_hw_descs;
333 struct axi_dma_hw_desc *hw_desc;
334 int descs_put;
335
336 for (descs_put = 0; descs_put < count; descs_put++) {
337 hw_desc = &desc->hw_desc[descs_put];
338 dma_pool_free(chan->desc_pool, hw_desc->lli, hw_desc->llp);
339 }
340
341 kfree(desc->hw_desc);
342 kfree(desc);
343 atomic_sub(descs_put, &chan->descs_allocated);
344 dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
345 axi_chan_name(chan), descs_put,
346 atomic_read(&chan->descs_allocated));
347 }
348
vchan_desc_put(struct virt_dma_desc * vdesc)349 static void vchan_desc_put(struct virt_dma_desc *vdesc)
350 {
351 axi_desc_put(vd_to_axi_desc(vdesc));
352 }
353
354 static enum dma_status
dma_chan_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)355 dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
356 struct dma_tx_state *txstate)
357 {
358 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
359 struct virt_dma_desc *vdesc;
360 enum dma_status status;
361 u32 completed_length;
362 unsigned long flags;
363 u32 completed_blocks;
364 size_t bytes = 0;
365 u32 length;
366 u32 len;
367
368 status = dma_cookie_status(dchan, cookie, txstate);
369 if (status == DMA_COMPLETE || !txstate)
370 return status;
371
372 spin_lock_irqsave(&chan->vc.lock, flags);
373
374 vdesc = vchan_find_desc(&chan->vc, cookie);
375 if (vdesc) {
376 length = vd_to_axi_desc(vdesc)->length;
377 completed_blocks = vd_to_axi_desc(vdesc)->completed_blocks;
378 len = vd_to_axi_desc(vdesc)->hw_desc[0].len;
379 completed_length = completed_blocks * len;
380 bytes = length - completed_length;
381 }
382
383 spin_unlock_irqrestore(&chan->vc.lock, flags);
384 dma_set_residue(txstate, bytes);
385
386 return status;
387 }
388
write_desc_llp(struct axi_dma_hw_desc * desc,dma_addr_t adr)389 static void write_desc_llp(struct axi_dma_hw_desc *desc, dma_addr_t adr)
390 {
391 desc->lli->llp = cpu_to_le64(adr);
392 }
393
write_chan_llp(struct axi_dma_chan * chan,dma_addr_t adr)394 static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
395 {
396 axi_chan_iowrite64(chan, CH_LLP, adr);
397 }
398
dw_axi_dma_set_byte_halfword(struct axi_dma_chan * chan,bool set)399 static void dw_axi_dma_set_byte_halfword(struct axi_dma_chan *chan, bool set)
400 {
401 u32 offset = DMAC_APB_BYTE_WR_CH_EN;
402 u32 reg_width, val;
403
404 if (!chan->chip->apb_regs) {
405 dev_dbg(chan->chip->dev, "apb_regs not initialized\n");
406 return;
407 }
408
409 reg_width = __ffs(chan->config.dst_addr_width);
410 if (reg_width == DWAXIDMAC_TRANS_WIDTH_16)
411 offset = DMAC_APB_HALFWORD_WR_CH_EN;
412
413 val = ioread32(chan->chip->apb_regs + offset);
414
415 if (set)
416 val |= BIT(chan->id);
417 else
418 val &= ~BIT(chan->id);
419
420 iowrite32(val, chan->chip->apb_regs + offset);
421 }
422 /* Called in chan locked context */
axi_chan_block_xfer_start(struct axi_dma_chan * chan,struct axi_dma_desc * first)423 static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
424 struct axi_dma_desc *first)
425 {
426 u32 priority = chan->chip->dw->hdata->priority[chan->id];
427 struct axi_dma_chan_config config = {};
428 u32 irq_mask;
429 u8 lms = 0; /* Select AXI0 master for LLI fetching */
430
431 if (unlikely(axi_chan_is_hw_enable(chan))) {
432 dev_err(chan2dev(chan), "%s is non-idle!\n",
433 axi_chan_name(chan));
434
435 return;
436 }
437
438 axi_dma_enable(chan->chip);
439
440 config.dst_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
441 config.src_multblk_type = DWAXIDMAC_MBLK_TYPE_LL;
442 config.tt_fc = DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC;
443 config.prior = priority;
444 config.hs_sel_dst = DWAXIDMAC_HS_SEL_HW;
445 config.hs_sel_src = DWAXIDMAC_HS_SEL_HW;
446 switch (chan->direction) {
447 case DMA_MEM_TO_DEV:
448 dw_axi_dma_set_byte_halfword(chan, true);
449 config.tt_fc = chan->config.device_fc ?
450 DWAXIDMAC_TT_FC_MEM_TO_PER_DST :
451 DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC;
452 if (chan->chip->apb_regs)
453 config.dst_per = chan->id;
454 else
455 config.dst_per = chan->hw_handshake_num;
456 break;
457 case DMA_DEV_TO_MEM:
458 config.tt_fc = chan->config.device_fc ?
459 DWAXIDMAC_TT_FC_PER_TO_MEM_SRC :
460 DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC;
461 if (chan->chip->apb_regs)
462 config.src_per = chan->id;
463 else
464 config.src_per = chan->hw_handshake_num;
465 break;
466 default:
467 break;
468 }
469 axi_chan_config_write(chan, &config);
470
471 write_chan_llp(chan, first->hw_desc[0].llp | lms);
472
473 irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
474 axi_chan_irq_sig_set(chan, irq_mask);
475
476 /* Generate 'suspend' status but don't generate interrupt */
477 irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
478 axi_chan_irq_set(chan, irq_mask);
479
480 axi_chan_enable(chan);
481 }
482
axi_chan_start_first_queued(struct axi_dma_chan * chan)483 static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
484 {
485 struct axi_dma_desc *desc;
486 struct virt_dma_desc *vd;
487
488 vd = vchan_next_desc(&chan->vc);
489 if (!vd)
490 return;
491
492 desc = vd_to_axi_desc(vd);
493 dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
494 vd->tx.cookie);
495 axi_chan_block_xfer_start(chan, desc);
496 }
497
dma_chan_issue_pending(struct dma_chan * dchan)498 static void dma_chan_issue_pending(struct dma_chan *dchan)
499 {
500 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
501 unsigned long flags;
502
503 spin_lock_irqsave(&chan->vc.lock, flags);
504 if (vchan_issue_pending(&chan->vc))
505 axi_chan_start_first_queued(chan);
506 spin_unlock_irqrestore(&chan->vc.lock, flags);
507 }
508
dw_axi_dma_synchronize(struct dma_chan * dchan)509 static void dw_axi_dma_synchronize(struct dma_chan *dchan)
510 {
511 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
512
513 vchan_synchronize(&chan->vc);
514 }
515
dma_chan_alloc_chan_resources(struct dma_chan * dchan)516 static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
517 {
518 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
519
520 /* ASSERT: channel is idle */
521 if (axi_chan_is_hw_enable(chan)) {
522 dev_err(chan2dev(chan), "%s is non-idle!\n",
523 axi_chan_name(chan));
524 return -EBUSY;
525 }
526
527 /* LLI address must be aligned to a 64-byte boundary */
528 chan->desc_pool = dma_pool_create(dev_name(chan2dev(chan)),
529 chan->chip->dev,
530 sizeof(struct axi_dma_lli),
531 64, 0);
532 if (!chan->desc_pool) {
533 dev_err(chan2dev(chan), "No memory for descriptors\n");
534 return -ENOMEM;
535 }
536 dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
537
538 pm_runtime_get(chan->chip->dev);
539
540 return 0;
541 }
542
dma_chan_free_chan_resources(struct dma_chan * dchan)543 static void dma_chan_free_chan_resources(struct dma_chan *dchan)
544 {
545 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
546
547 /* ASSERT: channel is idle */
548 if (axi_chan_is_hw_enable(chan))
549 dev_err(dchan2dev(dchan), "%s is non-idle!\n",
550 axi_chan_name(chan));
551
552 axi_chan_disable(chan);
553 axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
554
555 vchan_free_chan_resources(&chan->vc);
556
557 dma_pool_destroy(chan->desc_pool);
558 chan->desc_pool = NULL;
559 dev_vdbg(dchan2dev(dchan),
560 "%s: free resources, descriptor still allocated: %u\n",
561 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
562
563 pm_runtime_put(chan->chip->dev);
564 }
565
dw_axi_dma_set_hw_channel(struct axi_dma_chan * chan,bool set)566 static void dw_axi_dma_set_hw_channel(struct axi_dma_chan *chan, bool set)
567 {
568 struct axi_dma_chip *chip = chan->chip;
569 unsigned long reg_value, val;
570
571 if (!chip->apb_regs) {
572 dev_err(chip->dev, "apb_regs not initialized\n");
573 return;
574 }
575
576 /*
577 * An unused DMA channel has a default value of 0x3F.
578 * Lock the DMA channel by assign a handshake number to the channel.
579 * Unlock the DMA channel by assign 0x3F to the channel.
580 */
581 if (set)
582 val = chan->hw_handshake_num;
583 else
584 val = UNUSED_CHANNEL;
585
586 reg_value = lo_hi_readq(chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
587
588 /* Channel is already allocated, set handshake as per channel ID */
589 /* 64 bit write should handle for 8 channels */
590
591 reg_value &= ~(DMA_APB_HS_SEL_MASK <<
592 (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
593 reg_value |= (val << (chan->id * DMA_APB_HS_SEL_BIT_SIZE));
594 lo_hi_writeq(reg_value, chip->apb_regs + DMAC_APB_HW_HS_SEL_0);
595
596 return;
597 }
598
599 /*
600 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
601 * as 1, it understands that the current block is the final block in the
602 * transfer and completes the DMA transfer operation at the end of current
603 * block transfer.
604 */
set_desc_last(struct axi_dma_hw_desc * desc)605 static void set_desc_last(struct axi_dma_hw_desc *desc)
606 {
607 u32 val;
608
609 val = le32_to_cpu(desc->lli->ctl_hi);
610 val |= CH_CTL_H_LLI_LAST;
611 desc->lli->ctl_hi = cpu_to_le32(val);
612 }
613
write_desc_sar(struct axi_dma_hw_desc * desc,dma_addr_t adr)614 static void write_desc_sar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
615 {
616 desc->lli->sar = cpu_to_le64(adr);
617 }
618
write_desc_dar(struct axi_dma_hw_desc * desc,dma_addr_t adr)619 static void write_desc_dar(struct axi_dma_hw_desc *desc, dma_addr_t adr)
620 {
621 desc->lli->dar = cpu_to_le64(adr);
622 }
623
set_desc_src_master(struct axi_dma_hw_desc * desc)624 static void set_desc_src_master(struct axi_dma_hw_desc *desc)
625 {
626 u32 val;
627
628 /* Select AXI0 for source master */
629 val = le32_to_cpu(desc->lli->ctl_lo);
630 val &= ~CH_CTL_L_SRC_MAST;
631 desc->lli->ctl_lo = cpu_to_le32(val);
632 }
633
set_desc_dest_master(struct axi_dma_hw_desc * hw_desc,struct axi_dma_desc * desc)634 static void set_desc_dest_master(struct axi_dma_hw_desc *hw_desc,
635 struct axi_dma_desc *desc)
636 {
637 u32 val;
638
639 /* Select AXI1 for source master if available */
640 val = le32_to_cpu(hw_desc->lli->ctl_lo);
641 if (desc->chan->chip->dw->hdata->nr_masters > 1)
642 val |= CH_CTL_L_DST_MAST;
643 else
644 val &= ~CH_CTL_L_DST_MAST;
645
646 hw_desc->lli->ctl_lo = cpu_to_le32(val);
647 }
648
dw_axi_dma_set_hw_desc(struct axi_dma_chan * chan,struct axi_dma_hw_desc * hw_desc,dma_addr_t mem_addr,size_t len)649 static int dw_axi_dma_set_hw_desc(struct axi_dma_chan *chan,
650 struct axi_dma_hw_desc *hw_desc,
651 dma_addr_t mem_addr, size_t len)
652 {
653 unsigned int data_width = BIT(chan->chip->dw->hdata->m_data_width);
654 unsigned int reg_width;
655 unsigned int mem_width;
656 dma_addr_t device_addr;
657 size_t axi_block_ts;
658 size_t block_ts;
659 u32 ctllo, ctlhi;
660 u32 burst_len;
661
662 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
663
664 mem_width = __ffs(data_width | mem_addr | len);
665 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
666 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
667
668 if (!IS_ALIGNED(mem_addr, 4)) {
669 dev_err(chan->chip->dev, "invalid buffer alignment\n");
670 return -EINVAL;
671 }
672
673 switch (chan->direction) {
674 case DMA_MEM_TO_DEV:
675 reg_width = __ffs(chan->config.dst_addr_width);
676 device_addr = chan->config.dst_addr;
677 ctllo = reg_width << CH_CTL_L_DST_WIDTH_POS |
678 mem_width << CH_CTL_L_SRC_WIDTH_POS |
679 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_DST_INC_POS |
680 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS;
681 block_ts = len >> mem_width;
682 break;
683 case DMA_DEV_TO_MEM:
684 reg_width = __ffs(chan->config.src_addr_width);
685 device_addr = chan->config.src_addr;
686 ctllo = reg_width << CH_CTL_L_SRC_WIDTH_POS |
687 mem_width << CH_CTL_L_DST_WIDTH_POS |
688 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
689 DWAXIDMAC_CH_CTL_L_NOINC << CH_CTL_L_SRC_INC_POS;
690 block_ts = len >> reg_width;
691 break;
692 default:
693 return -EINVAL;
694 }
695
696 if (block_ts > axi_block_ts)
697 return -EINVAL;
698
699 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
700 if (unlikely(!hw_desc->lli))
701 return -ENOMEM;
702
703 ctlhi = CH_CTL_H_LLI_VALID;
704
705 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
706 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
707 ctlhi |= CH_CTL_H_ARLEN_EN | CH_CTL_H_AWLEN_EN |
708 burst_len << CH_CTL_H_ARLEN_POS |
709 burst_len << CH_CTL_H_AWLEN_POS;
710 }
711
712 hw_desc->lli->ctl_hi = cpu_to_le32(ctlhi);
713
714 if (chan->direction == DMA_MEM_TO_DEV) {
715 write_desc_sar(hw_desc, mem_addr);
716 write_desc_dar(hw_desc, device_addr);
717 } else {
718 write_desc_sar(hw_desc, device_addr);
719 write_desc_dar(hw_desc, mem_addr);
720 }
721
722 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
723
724 ctllo |= DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
725 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS;
726 hw_desc->lli->ctl_lo = cpu_to_le32(ctllo);
727
728 set_desc_src_master(hw_desc);
729
730 hw_desc->len = len;
731 return 0;
732 }
733
calculate_block_len(struct axi_dma_chan * chan,dma_addr_t dma_addr,size_t buf_len,enum dma_transfer_direction direction)734 static size_t calculate_block_len(struct axi_dma_chan *chan,
735 dma_addr_t dma_addr, size_t buf_len,
736 enum dma_transfer_direction direction)
737 {
738 u32 data_width, reg_width, mem_width;
739 size_t axi_block_ts, block_len;
740
741 axi_block_ts = chan->chip->dw->hdata->block_size[chan->id];
742
743 switch (direction) {
744 case DMA_MEM_TO_DEV:
745 data_width = BIT(chan->chip->dw->hdata->m_data_width);
746 mem_width = __ffs(data_width | dma_addr | buf_len);
747 if (mem_width > DWAXIDMAC_TRANS_WIDTH_32)
748 mem_width = DWAXIDMAC_TRANS_WIDTH_32;
749
750 block_len = axi_block_ts << mem_width;
751 break;
752 case DMA_DEV_TO_MEM:
753 reg_width = __ffs(chan->config.src_addr_width);
754 block_len = axi_block_ts << reg_width;
755 break;
756 default:
757 block_len = 0;
758 }
759
760 return block_len;
761 }
762
763 static struct dma_async_tx_descriptor *
dw_axi_dma_chan_prep_cyclic(struct dma_chan * dchan,dma_addr_t dma_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)764 dw_axi_dma_chan_prep_cyclic(struct dma_chan *dchan, dma_addr_t dma_addr,
765 size_t buf_len, size_t period_len,
766 enum dma_transfer_direction direction,
767 unsigned long flags)
768 {
769 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
770 struct axi_dma_hw_desc *hw_desc = NULL;
771 struct axi_dma_desc *desc = NULL;
772 dma_addr_t src_addr = dma_addr;
773 u32 num_periods, num_segments;
774 size_t axi_block_len;
775 u32 total_segments;
776 u32 segment_len;
777 unsigned int i;
778 int status;
779 u64 llp = 0;
780 u8 lms = 0; /* Select AXI0 master for LLI fetching */
781
782 num_periods = buf_len / period_len;
783
784 axi_block_len = calculate_block_len(chan, dma_addr, buf_len, direction);
785 if (axi_block_len == 0)
786 return NULL;
787
788 num_segments = DIV_ROUND_UP(period_len, axi_block_len);
789 segment_len = DIV_ROUND_UP(period_len, num_segments);
790
791 total_segments = num_periods * num_segments;
792
793 desc = axi_desc_alloc(total_segments);
794 if (unlikely(!desc))
795 goto err_desc_get;
796
797 chan->direction = direction;
798 desc->chan = chan;
799 chan->cyclic = true;
800 desc->length = 0;
801 desc->period_len = period_len;
802
803 for (i = 0; i < total_segments; i++) {
804 hw_desc = &desc->hw_desc[i];
805
806 status = dw_axi_dma_set_hw_desc(chan, hw_desc, src_addr,
807 segment_len);
808 if (status < 0)
809 goto err_desc_get;
810
811 desc->length += hw_desc->len;
812 /* Set end-of-link to the linked descriptor, so that cyclic
813 * callback function can be triggered during interrupt.
814 */
815 set_desc_last(hw_desc);
816
817 src_addr += segment_len;
818 }
819
820 llp = desc->hw_desc[0].llp;
821
822 /* Managed transfer list */
823 do {
824 hw_desc = &desc->hw_desc[--total_segments];
825 write_desc_llp(hw_desc, llp | lms);
826 llp = hw_desc->llp;
827 } while (total_segments);
828
829 dw_axi_dma_set_hw_channel(chan, true);
830
831 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
832
833 err_desc_get:
834 if (desc)
835 axi_desc_put(desc);
836
837 return NULL;
838 }
839
840 static struct dma_async_tx_descriptor *
dw_axi_dma_chan_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction direction,unsigned long flags,void * context)841 dw_axi_dma_chan_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
842 unsigned int sg_len,
843 enum dma_transfer_direction direction,
844 unsigned long flags, void *context)
845 {
846 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
847 struct axi_dma_hw_desc *hw_desc = NULL;
848 struct axi_dma_desc *desc = NULL;
849 u32 num_segments, segment_len;
850 unsigned int loop = 0;
851 struct scatterlist *sg;
852 size_t axi_block_len;
853 u32 len, num_sgs;
854 unsigned int i;
855 dma_addr_t mem;
856 int status;
857 u64 llp = 0;
858 u8 lms = 0; /* Select AXI0 master for LLI fetching */
859
860 if (unlikely(!is_slave_direction(direction) || !sg_len))
861 return NULL;
862
863 mem = sg_dma_address(sgl);
864 len = sg_dma_len(sgl);
865
866 axi_block_len = calculate_block_len(chan, mem, len, direction);
867 if (axi_block_len == 0)
868 return NULL;
869
870 num_sgs = sg_nents_for_dma(sgl, sg_len, axi_block_len);
871 desc = axi_desc_alloc(num_sgs);
872 if (unlikely(!desc))
873 goto err_desc_get;
874
875 desc->chan = chan;
876 desc->length = 0;
877 chan->direction = direction;
878
879 for_each_sg(sgl, sg, sg_len, i) {
880 mem = sg_dma_address(sg);
881 len = sg_dma_len(sg);
882 num_segments = DIV_ROUND_UP(sg_dma_len(sg), axi_block_len);
883 segment_len = DIV_ROUND_UP(sg_dma_len(sg), num_segments);
884
885 do {
886 hw_desc = &desc->hw_desc[loop++];
887 status = dw_axi_dma_set_hw_desc(chan, hw_desc, mem, segment_len);
888 if (status < 0)
889 goto err_desc_get;
890
891 desc->length += hw_desc->len;
892 len -= segment_len;
893 mem += segment_len;
894 } while (len >= segment_len);
895 }
896
897 /* Set end-of-link to the last link descriptor of list */
898 set_desc_last(&desc->hw_desc[num_sgs - 1]);
899
900 /* Managed transfer list */
901 do {
902 hw_desc = &desc->hw_desc[--num_sgs];
903 write_desc_llp(hw_desc, llp | lms);
904 llp = hw_desc->llp;
905 } while (num_sgs);
906
907 dw_axi_dma_set_hw_channel(chan, true);
908
909 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
910
911 err_desc_get:
912 if (desc)
913 axi_desc_put(desc);
914
915 return NULL;
916 }
917
918 static struct dma_async_tx_descriptor *
dma_chan_prep_dma_memcpy(struct dma_chan * dchan,dma_addr_t dst_adr,dma_addr_t src_adr,size_t len,unsigned long flags)919 dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
920 dma_addr_t src_adr, size_t len, unsigned long flags)
921 {
922 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
923 size_t block_ts, max_block_ts, xfer_len;
924 struct axi_dma_hw_desc *hw_desc = NULL;
925 struct axi_dma_desc *desc = NULL;
926 u32 xfer_width, reg, num;
927 u64 llp = 0;
928 u8 lms = 0; /* Select AXI0 master for LLI fetching */
929
930 dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
931 axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
932
933 max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
934 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, len);
935 num = DIV_ROUND_UP(len, max_block_ts << xfer_width);
936 desc = axi_desc_alloc(num);
937 if (unlikely(!desc))
938 goto err_desc_get;
939
940 desc->chan = chan;
941 num = 0;
942 desc->length = 0;
943 while (len) {
944 xfer_len = len;
945
946 hw_desc = &desc->hw_desc[num];
947 /*
948 * Take care for the alignment.
949 * Actually source and destination widths can be different, but
950 * make them same to be simpler.
951 */
952 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
953
954 /*
955 * block_ts indicates the total number of data of width
956 * to be transferred in a DMA block transfer.
957 * BLOCK_TS register should be set to block_ts - 1
958 */
959 block_ts = xfer_len >> xfer_width;
960 if (block_ts > max_block_ts) {
961 block_ts = max_block_ts;
962 xfer_len = max_block_ts << xfer_width;
963 }
964
965 hw_desc->lli = axi_desc_get(chan, &hw_desc->llp);
966 if (unlikely(!hw_desc->lli))
967 goto err_desc_get;
968
969 write_desc_sar(hw_desc, src_adr);
970 write_desc_dar(hw_desc, dst_adr);
971 hw_desc->lli->block_ts_lo = cpu_to_le32(block_ts - 1);
972
973 reg = CH_CTL_H_LLI_VALID;
974 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
975 u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
976
977 reg |= (CH_CTL_H_ARLEN_EN |
978 burst_len << CH_CTL_H_ARLEN_POS |
979 CH_CTL_H_AWLEN_EN |
980 burst_len << CH_CTL_H_AWLEN_POS);
981 }
982 hw_desc->lli->ctl_hi = cpu_to_le32(reg);
983
984 reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
985 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
986 xfer_width << CH_CTL_L_DST_WIDTH_POS |
987 xfer_width << CH_CTL_L_SRC_WIDTH_POS |
988 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
989 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
990 hw_desc->lli->ctl_lo = cpu_to_le32(reg);
991
992 set_desc_src_master(hw_desc);
993 set_desc_dest_master(hw_desc, desc);
994
995 hw_desc->len = xfer_len;
996 desc->length += hw_desc->len;
997 /* update the length and addresses for the next loop cycle */
998 len -= xfer_len;
999 dst_adr += xfer_len;
1000 src_adr += xfer_len;
1001 num++;
1002 }
1003
1004 /* Set end-of-link to the last link descriptor of list */
1005 set_desc_last(&desc->hw_desc[num - 1]);
1006 /* Managed transfer list */
1007 do {
1008 hw_desc = &desc->hw_desc[--num];
1009 write_desc_llp(hw_desc, llp | lms);
1010 llp = hw_desc->llp;
1011 } while (num);
1012
1013 return vchan_tx_prep(&chan->vc, &desc->vd, flags);
1014
1015 err_desc_get:
1016 if (desc)
1017 axi_desc_put(desc);
1018 return NULL;
1019 }
1020
dw_axi_dma_chan_slave_config(struct dma_chan * dchan,struct dma_slave_config * config)1021 static int dw_axi_dma_chan_slave_config(struct dma_chan *dchan,
1022 struct dma_slave_config *config)
1023 {
1024 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1025
1026 memcpy(&chan->config, config, sizeof(*config));
1027
1028 return 0;
1029 }
1030
axi_chan_dump_lli(struct axi_dma_chan * chan,struct axi_dma_hw_desc * desc)1031 static void axi_chan_dump_lli(struct axi_dma_chan *chan,
1032 struct axi_dma_hw_desc *desc)
1033 {
1034 if (!desc->lli) {
1035 dev_err(dchan2dev(&chan->vc.chan), "NULL LLI\n");
1036 return;
1037 }
1038
1039 dev_err(dchan2dev(&chan->vc.chan),
1040 "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
1041 le64_to_cpu(desc->lli->sar),
1042 le64_to_cpu(desc->lli->dar),
1043 le64_to_cpu(desc->lli->llp),
1044 le32_to_cpu(desc->lli->block_ts_lo),
1045 le32_to_cpu(desc->lli->ctl_hi),
1046 le32_to_cpu(desc->lli->ctl_lo));
1047 }
1048
axi_chan_list_dump_lli(struct axi_dma_chan * chan,struct axi_dma_desc * desc_head)1049 static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
1050 struct axi_dma_desc *desc_head)
1051 {
1052 int count = atomic_read(&chan->descs_allocated);
1053 int i;
1054
1055 for (i = 0; i < count; i++)
1056 axi_chan_dump_lli(chan, &desc_head->hw_desc[i]);
1057 }
1058
axi_chan_handle_err(struct axi_dma_chan * chan,u32 status)1059 static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
1060 {
1061 struct virt_dma_desc *vd;
1062 unsigned long flags;
1063
1064 spin_lock_irqsave(&chan->vc.lock, flags);
1065
1066 axi_chan_disable(chan);
1067
1068 /* The bad descriptor currently is in the head of vc list */
1069 vd = vchan_next_desc(&chan->vc);
1070 if (!vd) {
1071 dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1072 axi_chan_name(chan));
1073 goto out;
1074 }
1075 /* Remove the completed descriptor from issued list */
1076 list_del(&vd->node);
1077
1078 /* WARN about bad descriptor */
1079 dev_err(chan2dev(chan),
1080 "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
1081 axi_chan_name(chan), vd->tx.cookie, status);
1082 axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
1083
1084 vchan_cookie_complete(vd);
1085
1086 /* Try to restart the controller */
1087 axi_chan_start_first_queued(chan);
1088
1089 out:
1090 spin_unlock_irqrestore(&chan->vc.lock, flags);
1091 }
1092
axi_chan_block_xfer_complete(struct axi_dma_chan * chan)1093 static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
1094 {
1095 int count = atomic_read(&chan->descs_allocated);
1096 struct axi_dma_hw_desc *hw_desc;
1097 struct axi_dma_desc *desc;
1098 struct virt_dma_desc *vd;
1099 unsigned long flags;
1100 u64 llp;
1101 int i;
1102
1103 spin_lock_irqsave(&chan->vc.lock, flags);
1104 if (unlikely(axi_chan_is_hw_enable(chan))) {
1105 dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
1106 axi_chan_name(chan));
1107 axi_chan_disable(chan);
1108 }
1109
1110 /* The completed descriptor currently is in the head of vc list */
1111 vd = vchan_next_desc(&chan->vc);
1112 if (!vd) {
1113 dev_err(chan2dev(chan), "BUG: %s, IRQ with no descriptors\n",
1114 axi_chan_name(chan));
1115 goto out;
1116 }
1117
1118 if (chan->cyclic) {
1119 desc = vd_to_axi_desc(vd);
1120 if (desc) {
1121 llp = lo_hi_readq(chan->chan_regs + CH_LLP);
1122 for (i = 0; i < count; i++) {
1123 hw_desc = &desc->hw_desc[i];
1124 if (hw_desc->llp == llp) {
1125 axi_chan_irq_clear(chan, hw_desc->lli->status_lo);
1126 hw_desc->lli->ctl_hi |= CH_CTL_H_LLI_VALID;
1127 desc->completed_blocks = i;
1128
1129 if (((hw_desc->len * (i + 1)) % desc->period_len) == 0)
1130 vchan_cyclic_callback(vd);
1131 break;
1132 }
1133 }
1134
1135 axi_chan_enable(chan);
1136 }
1137 } else {
1138 /* Remove the completed descriptor from issued list before completing */
1139 list_del(&vd->node);
1140 vchan_cookie_complete(vd);
1141 }
1142
1143 out:
1144 spin_unlock_irqrestore(&chan->vc.lock, flags);
1145 }
1146
dw_axi_dma_interrupt(int irq,void * dev_id)1147 static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
1148 {
1149 struct axi_dma_chip *chip = dev_id;
1150 struct dw_axi_dma *dw = chip->dw;
1151 struct axi_dma_chan *chan;
1152
1153 u32 status, i;
1154
1155 /* Disable DMAC interrupts. We'll enable them after processing channels */
1156 axi_dma_irq_disable(chip);
1157
1158 /* Poll, clear and process every channel interrupt status */
1159 for (i = 0; i < dw->hdata->nr_channels; i++) {
1160 chan = &dw->chan[i];
1161 status = axi_chan_irq_read(chan);
1162 axi_chan_irq_clear(chan, status);
1163
1164 dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
1165 axi_chan_name(chan), i, status);
1166
1167 if (status & DWAXIDMAC_IRQ_ALL_ERR)
1168 axi_chan_handle_err(chan, status);
1169 else if (status & DWAXIDMAC_IRQ_DMA_TRF)
1170 axi_chan_block_xfer_complete(chan);
1171 }
1172
1173 /* Re-enable interrupts */
1174 axi_dma_irq_enable(chip);
1175
1176 return IRQ_HANDLED;
1177 }
1178
dma_chan_terminate_all(struct dma_chan * dchan)1179 static int dma_chan_terminate_all(struct dma_chan *dchan)
1180 {
1181 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1182 u32 chan_active = BIT(chan->id) << DMAC_CHAN_EN_SHIFT;
1183 unsigned long flags;
1184 u32 val;
1185 int ret;
1186 LIST_HEAD(head);
1187
1188 axi_chan_disable(chan);
1189
1190 ret = readl_poll_timeout_atomic(chan->chip->regs + DMAC_CHEN, val,
1191 !(val & chan_active), 1000, 50000);
1192 if (ret == -ETIMEDOUT)
1193 dev_warn(dchan2dev(dchan),
1194 "%s failed to stop\n", axi_chan_name(chan));
1195
1196 if (chan->direction != DMA_MEM_TO_MEM)
1197 dw_axi_dma_set_hw_channel(chan, false);
1198 if (chan->direction == DMA_MEM_TO_DEV)
1199 dw_axi_dma_set_byte_halfword(chan, false);
1200
1201 spin_lock_irqsave(&chan->vc.lock, flags);
1202
1203 vchan_get_all_descriptors(&chan->vc, &head);
1204
1205 chan->cyclic = false;
1206 spin_unlock_irqrestore(&chan->vc.lock, flags);
1207
1208 vchan_dma_desc_free_list(&chan->vc, &head);
1209
1210 dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
1211
1212 return 0;
1213 }
1214
dma_chan_pause(struct dma_chan * dchan)1215 static int dma_chan_pause(struct dma_chan *dchan)
1216 {
1217 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1218 unsigned long flags;
1219 unsigned int timeout = 20; /* timeout iterations */
1220 u64 val;
1221
1222 spin_lock_irqsave(&chan->vc.lock, flags);
1223
1224 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
1225 val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);
1226 if (chan->id >= DMAC_CHAN_16) {
1227 val |= (u64)(BIT(chan->id) >> DMAC_CHAN_16)
1228 << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT) |
1229 (u64)(BIT(chan->id) >> DMAC_CHAN_16)
1230 << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT);
1231 } else {
1232 val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
1233 BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
1234 }
1235 axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);
1236 } else {
1237 if (chan->chip->dw->hdata->reg_map_8_channels) {
1238 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1239 val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
1240 BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
1241 axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
1242 } else {
1243 val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
1244 val |= BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT |
1245 BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT;
1246 axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);
1247 }
1248 }
1249
1250 do {
1251 if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
1252 break;
1253
1254 udelay(2);
1255 } while (--timeout);
1256
1257 axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
1258
1259 chan->is_paused = true;
1260
1261 spin_unlock_irqrestore(&chan->vc.lock, flags);
1262
1263 return timeout ? 0 : -EAGAIN;
1264 }
1265
1266 /* Called in chan locked context */
axi_chan_resume(struct axi_dma_chan * chan)1267 static inline void axi_chan_resume(struct axi_dma_chan *chan)
1268 {
1269 u64 val;
1270
1271 if (chan->chip->dw->hdata->nr_channels >= DMAC_CHAN_16) {
1272 val = axi_dma_ioread64(chan->chip, DMAC_CHSUSPREG);
1273 if (chan->id >= DMAC_CHAN_16) {
1274 val &= ~((u64)(BIT(chan->id) >> DMAC_CHAN_16)
1275 << (DMAC_CHAN_SUSP2_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
1276 val |= ((u64)(BIT(chan->id) >> DMAC_CHAN_16)
1277 << (DMAC_CHAN_SUSP2_WE_SHIFT + DMAC_CHAN_BLOCK_SHIFT));
1278 } else {
1279 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
1280 val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
1281 }
1282 axi_dma_iowrite64(chan->chip, DMAC_CHSUSPREG, val);
1283 } else {
1284 if (chan->chip->dw->hdata->reg_map_8_channels) {
1285 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
1286 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
1287 val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
1288 axi_dma_iowrite32(chan->chip, DMAC_CHEN, (u32)val);
1289 } else {
1290 val = axi_dma_ioread32(chan->chip, DMAC_CHSUSPREG);
1291 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP2_SHIFT);
1292 val |= (BIT(chan->id) << DMAC_CHAN_SUSP2_WE_SHIFT);
1293 axi_dma_iowrite32(chan->chip, DMAC_CHSUSPREG, (u32)val);
1294 }
1295 }
1296
1297 chan->is_paused = false;
1298 }
1299
dma_chan_resume(struct dma_chan * dchan)1300 static int dma_chan_resume(struct dma_chan *dchan)
1301 {
1302 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
1303 unsigned long flags;
1304
1305 spin_lock_irqsave(&chan->vc.lock, flags);
1306
1307 if (chan->is_paused)
1308 axi_chan_resume(chan);
1309
1310 spin_unlock_irqrestore(&chan->vc.lock, flags);
1311
1312 return 0;
1313 }
1314
axi_dma_suspend(struct axi_dma_chip * chip)1315 static int axi_dma_suspend(struct axi_dma_chip *chip)
1316 {
1317 axi_dma_irq_disable(chip);
1318 axi_dma_disable(chip);
1319
1320 clk_disable_unprepare(chip->core_clk);
1321 clk_disable_unprepare(chip->cfgr_clk);
1322
1323 return 0;
1324 }
1325
axi_dma_resume(struct axi_dma_chip * chip)1326 static int axi_dma_resume(struct axi_dma_chip *chip)
1327 {
1328 int ret;
1329
1330 ret = clk_prepare_enable(chip->cfgr_clk);
1331 if (ret < 0)
1332 return ret;
1333
1334 ret = clk_prepare_enable(chip->core_clk);
1335 if (ret < 0)
1336 return ret;
1337
1338 axi_dma_enable(chip);
1339 axi_dma_irq_enable(chip);
1340
1341 return 0;
1342 }
1343
axi_dma_runtime_suspend(struct device * dev)1344 static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
1345 {
1346 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1347
1348 return axi_dma_suspend(chip);
1349 }
1350
axi_dma_runtime_resume(struct device * dev)1351 static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
1352 {
1353 struct axi_dma_chip *chip = dev_get_drvdata(dev);
1354
1355 return axi_dma_resume(chip);
1356 }
1357
dw_axi_dma_of_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1358 static struct dma_chan *dw_axi_dma_of_xlate(struct of_phandle_args *dma_spec,
1359 struct of_dma *ofdma)
1360 {
1361 struct dw_axi_dma *dw = ofdma->of_dma_data;
1362 struct axi_dma_chan *chan;
1363 struct dma_chan *dchan;
1364
1365 dchan = dma_get_any_slave_channel(&dw->dma);
1366 if (!dchan)
1367 return NULL;
1368
1369 chan = dchan_to_axi_dma_chan(dchan);
1370 chan->hw_handshake_num = dma_spec->args[0];
1371 return dchan;
1372 }
1373
parse_device_properties(struct axi_dma_chip * chip)1374 static int parse_device_properties(struct axi_dma_chip *chip)
1375 {
1376 struct device *dev = chip->dev;
1377 u32 tmp, carr[DMAC_MAX_CHANNELS];
1378 int ret;
1379
1380 ret = device_property_read_u32(dev, "dma-channels", &tmp);
1381 if (ret)
1382 return ret;
1383 if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
1384 return -EINVAL;
1385
1386 chip->dw->hdata->nr_channels = tmp;
1387 if (tmp <= DMA_REG_MAP_CH_REF)
1388 chip->dw->hdata->reg_map_8_channels = true;
1389
1390 ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
1391 if (ret)
1392 return ret;
1393 if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
1394 return -EINVAL;
1395
1396 chip->dw->hdata->nr_masters = tmp;
1397
1398 ret = device_property_read_u32(dev, "snps,data-width", &tmp);
1399 if (ret)
1400 return ret;
1401 if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
1402 return -EINVAL;
1403
1404 chip->dw->hdata->m_data_width = tmp;
1405
1406 ret = device_property_read_u32_array(dev, "snps,block-size", carr,
1407 chip->dw->hdata->nr_channels);
1408 if (ret)
1409 return ret;
1410 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1411 if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
1412 return -EINVAL;
1413
1414 chip->dw->hdata->block_size[tmp] = carr[tmp];
1415 }
1416
1417 ret = device_property_read_u32_array(dev, "snps,priority", carr,
1418 chip->dw->hdata->nr_channels);
1419 if (ret)
1420 return ret;
1421 /* Priority value must be programmed within [0:nr_channels-1] range */
1422 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
1423 if (carr[tmp] >= chip->dw->hdata->nr_channels)
1424 return -EINVAL;
1425
1426 chip->dw->hdata->priority[tmp] = carr[tmp];
1427 }
1428
1429 /* axi-max-burst-len is optional property */
1430 ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
1431 if (!ret) {
1432 if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
1433 return -EINVAL;
1434 if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
1435 return -EINVAL;
1436
1437 chip->dw->hdata->restrict_axi_burst_len = true;
1438 chip->dw->hdata->axi_rw_burst_len = tmp;
1439 }
1440
1441 return 0;
1442 }
1443
axi_req_irqs(struct platform_device * pdev,struct axi_dma_chip * chip)1444 static int axi_req_irqs(struct platform_device *pdev, struct axi_dma_chip *chip)
1445 {
1446 int irq_count = platform_irq_count(pdev);
1447 int ret;
1448
1449 for (int i = 0; i < irq_count; i++) {
1450 chip->irq[i] = platform_get_irq(pdev, i);
1451 if (chip->irq[i] < 0)
1452 return chip->irq[i];
1453 ret = devm_request_irq(chip->dev, chip->irq[i], dw_axi_dma_interrupt,
1454 IRQF_SHARED, KBUILD_MODNAME, chip);
1455 if (ret < 0)
1456 return ret;
1457 }
1458
1459 return 0;
1460 }
1461
dw_probe(struct platform_device * pdev)1462 static int dw_probe(struct platform_device *pdev)
1463 {
1464 struct axi_dma_chip *chip;
1465 struct dw_axi_dma *dw;
1466 struct dw_axi_dma_hcfg *hdata;
1467 struct reset_control *resets;
1468 unsigned int flags;
1469 u32 i;
1470 int ret;
1471
1472 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
1473 if (!chip)
1474 return -ENOMEM;
1475
1476 dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
1477 if (!dw)
1478 return -ENOMEM;
1479
1480 hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
1481 if (!hdata)
1482 return -ENOMEM;
1483
1484 chip->dw = dw;
1485 chip->dev = &pdev->dev;
1486 chip->dw->hdata = hdata;
1487
1488 chip->regs = devm_platform_ioremap_resource(pdev, 0);
1489 if (IS_ERR(chip->regs))
1490 return PTR_ERR(chip->regs);
1491
1492 flags = (uintptr_t)of_device_get_match_data(&pdev->dev);
1493 if (flags & AXI_DMA_FLAG_HAS_APB_REGS) {
1494 chip->apb_regs = devm_platform_ioremap_resource(pdev, 1);
1495 if (IS_ERR(chip->apb_regs))
1496 return PTR_ERR(chip->apb_regs);
1497 }
1498
1499 if (flags & AXI_DMA_FLAG_HAS_RESETS) {
1500 resets = devm_reset_control_array_get_exclusive(&pdev->dev);
1501 if (IS_ERR(resets))
1502 return PTR_ERR(resets);
1503
1504 ret = reset_control_deassert(resets);
1505 if (ret)
1506 return ret;
1507 }
1508
1509 chip->dw->hdata->use_cfg2 = !!(flags & AXI_DMA_FLAG_USE_CFG2);
1510
1511 chip->core_clk = devm_clk_get(chip->dev, "core-clk");
1512 if (IS_ERR(chip->core_clk))
1513 return PTR_ERR(chip->core_clk);
1514
1515 chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
1516 if (IS_ERR(chip->cfgr_clk))
1517 return PTR_ERR(chip->cfgr_clk);
1518
1519 ret = parse_device_properties(chip);
1520 if (ret)
1521 return ret;
1522
1523 dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
1524 sizeof(*dw->chan), GFP_KERNEL);
1525 if (!dw->chan)
1526 return -ENOMEM;
1527
1528 ret = axi_req_irqs(pdev, chip);
1529 if (ret)
1530 return ret;
1531
1532 INIT_LIST_HEAD(&dw->dma.channels);
1533 for (i = 0; i < hdata->nr_channels; i++) {
1534 struct axi_dma_chan *chan = &dw->chan[i];
1535
1536 chan->chip = chip;
1537 chan->id = i;
1538 chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
1539 atomic_set(&chan->descs_allocated, 0);
1540
1541 chan->vc.desc_free = vchan_desc_put;
1542 vchan_init(&chan->vc, &dw->dma);
1543 }
1544
1545 /* Set capabilities */
1546 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
1547 dma_cap_set(DMA_SLAVE, dw->dma.cap_mask);
1548 dma_cap_set(DMA_CYCLIC, dw->dma.cap_mask);
1549
1550 /* DMA capabilities */
1551 dw->dma.max_burst = hdata->axi_rw_burst_len;
1552 dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
1553 dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
1554 dw->dma.directions = BIT(DMA_MEM_TO_MEM);
1555 dw->dma.directions |= BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1556 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1557
1558 dw->dma.dev = chip->dev;
1559 dw->dma.device_tx_status = dma_chan_tx_status;
1560 dw->dma.device_issue_pending = dma_chan_issue_pending;
1561 dw->dma.device_terminate_all = dma_chan_terminate_all;
1562 dw->dma.device_pause = dma_chan_pause;
1563 dw->dma.device_resume = dma_chan_resume;
1564
1565 dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
1566 dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
1567
1568 dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
1569 dw->dma.device_synchronize = dw_axi_dma_synchronize;
1570 dw->dma.device_config = dw_axi_dma_chan_slave_config;
1571 dw->dma.device_prep_slave_sg = dw_axi_dma_chan_prep_slave_sg;
1572 dw->dma.device_prep_dma_cyclic = dw_axi_dma_chan_prep_cyclic;
1573
1574 /*
1575 * Synopsis DesignWare AxiDMA datasheet mentioned Maximum
1576 * supported blocks is 1024. Device register width is 4 bytes.
1577 * Therefore, set constraint to 1024 * 4.
1578 */
1579 dw->dma.dev->dma_parms = &dw->dma_parms;
1580 dma_set_max_seg_size(&pdev->dev, MAX_BLOCK_SIZE);
1581 platform_set_drvdata(pdev, chip);
1582
1583 pm_runtime_enable(chip->dev);
1584
1585 /*
1586 * We can't just call pm_runtime_get here instead of
1587 * pm_runtime_get_noresume + axi_dma_resume because we need
1588 * driver to work also without Runtime PM.
1589 */
1590 pm_runtime_get_noresume(chip->dev);
1591 ret = axi_dma_resume(chip);
1592 if (ret < 0)
1593 goto err_pm_disable;
1594
1595 axi_dma_hw_init(chip);
1596
1597 pm_runtime_put(chip->dev);
1598
1599 ret = dmaenginem_async_device_register(&dw->dma);
1600 if (ret)
1601 goto err_pm_disable;
1602
1603 /* Register with OF helpers for DMA lookups */
1604 ret = of_dma_controller_register(pdev->dev.of_node,
1605 dw_axi_dma_of_xlate, dw);
1606 if (ret < 0)
1607 dev_warn(&pdev->dev,
1608 "Failed to register OF DMA controller, fallback to MEM_TO_MEM mode\n");
1609
1610 dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
1611 dw->hdata->nr_channels);
1612
1613 return 0;
1614
1615 err_pm_disable:
1616 pm_runtime_disable(chip->dev);
1617
1618 return ret;
1619 }
1620
dw_remove(struct platform_device * pdev)1621 static void dw_remove(struct platform_device *pdev)
1622 {
1623 struct axi_dma_chip *chip = platform_get_drvdata(pdev);
1624 struct dw_axi_dma *dw = chip->dw;
1625 struct axi_dma_chan *chan, *_chan;
1626 u32 i;
1627
1628 /* Enable clk before accessing to registers */
1629 clk_prepare_enable(chip->cfgr_clk);
1630 clk_prepare_enable(chip->core_clk);
1631 axi_dma_irq_disable(chip);
1632 for (i = 0; i < dw->hdata->nr_channels; i++) {
1633 axi_chan_disable(&chip->dw->chan[i]);
1634 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
1635 }
1636 axi_dma_disable(chip);
1637
1638 pm_runtime_disable(chip->dev);
1639 axi_dma_suspend(chip);
1640
1641 for (i = 0; i < DMAC_MAX_CHANNELS; i++)
1642 if (chip->irq[i] > 0)
1643 devm_free_irq(chip->dev, chip->irq[i], chip);
1644
1645 of_dma_controller_free(chip->dev->of_node);
1646
1647 list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
1648 vc.chan.device_node) {
1649 list_del(&chan->vc.chan.device_node);
1650 tasklet_kill(&chan->vc.task);
1651 }
1652 }
1653
1654 static const struct dev_pm_ops dw_axi_dma_pm_ops = {
1655 SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
1656 };
1657
1658 static const struct of_device_id dw_dma_of_id_table[] = {
1659 {
1660 .compatible = "snps,axi-dma-1.01a"
1661 }, {
1662 .compatible = "intel,kmb-axi-dma",
1663 .data = (void *)AXI_DMA_FLAG_HAS_APB_REGS,
1664 }, {
1665 .compatible = "starfive,jh7110-axi-dma",
1666 .data = (void *)(AXI_DMA_FLAG_HAS_RESETS | AXI_DMA_FLAG_USE_CFG2),
1667 }, {
1668 .compatible = "starfive,jh8100-axi-dma",
1669 .data = (void *)AXI_DMA_FLAG_HAS_RESETS,
1670 },
1671 {}
1672 };
1673 MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
1674
1675 static struct platform_driver dw_driver = {
1676 .probe = dw_probe,
1677 .remove = dw_remove,
1678 .driver = {
1679 .name = KBUILD_MODNAME,
1680 .of_match_table = dw_dma_of_id_table,
1681 .pm = &dw_axi_dma_pm_ops,
1682 },
1683 };
1684 module_platform_driver(dw_driver);
1685
1686 MODULE_LICENSE("GPL v2");
1687 MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1688 MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
1689