1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2012 Marvell International Ltd.
4 */
5
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/device.h>
16 #include <linux/platform_data/mmp_dma.h>
17 #include <linux/dmapool.h>
18 #include <linux/clk.h>
19 #include <linux/reset.h>
20 #include <linux/of_dma.h>
21 #include <linux/of.h>
22
23 #include "dmaengine.h"
24
25 #define DCSR 0x0000
26 #define DALGN 0x00a0
27 #define DINT 0x00f0
28 #define DDADR(n) (0x0200 + ((n) << 4))
29 #define DSADR(n) (0x0204 + ((n) << 4))
30 #define DTADR(n) (0x0208 + ((n) << 4))
31 #define DDADRH(n) (0x0300 + ((n) << 4))
32 #define DSADRH(n) (0x0304 + ((n) << 4))
33 #define DTADRH(n) (0x0308 + ((n) << 4))
34 #define DCMD 0x020c
35
36 #define DCSR_RUN BIT(31) /* Run Bit (read / write) */
37 #define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
38 #define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
39 #define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
40 #define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
41 #define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
42 #define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
43 #define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
44
45 #define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */
46 #define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
47 #define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
48 #define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
49 #define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
50 #define DCSR_LPAEEN BIT(21) /* Long Physical Address Extension Enable */
51 #define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
52 #define DCSR_EORINTR BIT(9) /* The end of Receive */
53
54 #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
55 #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
56 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
57
58 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
59 #define DDADR_STOP BIT(0) /* Stop (read / write) */
60
61 #define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
62 #define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
63 #define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
64 #define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
65 #define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
66 #define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
67 #define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
68 #define DCMD_BURST8 (1 << 16) /* 8 byte burst */
69 #define DCMD_BURST16 (2 << 16) /* 16 byte burst */
70 #define DCMD_BURST32 (3 << 16) /* 32 byte burst */
71 #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
72 #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
73 #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
74 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
75
76 #define PDMA_MAX_DESC_BYTES DCMD_LENGTH
77
78 struct mmp_pdma_desc_hw {
79 u32 ddadr; /* Points to the next descriptor + flags */
80 u32 dsadr; /* DSADR value for the current transfer */
81 u32 dtadr; /* DTADR value for the current transfer */
82 u32 dcmd; /* DCMD value for the current transfer */
83 /*
84 * The following 32-bit words are only used in the 64-bit, ie.
85 * LPAE (Long Physical Address Extension) mode.
86 * They are used to specify the high 32 bits of the descriptor's
87 * addresses.
88 */
89 u32 ddadrh; /* High 32-bit of DDADR */
90 u32 dsadrh; /* High 32-bit of DSADR */
91 u32 dtadrh; /* High 32-bit of DTADR */
92 u32 rsvd; /* reserved */
93 } __aligned(32);
94
95 struct mmp_pdma_desc_sw {
96 struct mmp_pdma_desc_hw desc;
97 struct list_head node;
98 struct list_head tx_list;
99 struct dma_async_tx_descriptor async_tx;
100 };
101
102 struct mmp_pdma_phy;
103
104 struct mmp_pdma_chan {
105 struct device *dev;
106 struct dma_chan chan;
107 struct dma_async_tx_descriptor desc;
108 struct mmp_pdma_phy *phy;
109 enum dma_transfer_direction dir;
110 struct dma_slave_config slave_config;
111
112 struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
113 * is in cyclic mode */
114
115 /* channel's basic info */
116 struct tasklet_struct tasklet;
117 u32 dcmd;
118 u32 drcmr;
119 u32 dev_addr;
120
121 /* list for desc */
122 spinlock_t desc_lock; /* Descriptor list lock */
123 struct list_head chain_pending; /* Link descriptors queue for pending */
124 struct list_head chain_running; /* Link descriptors queue for running */
125 bool idle; /* channel statue machine */
126 bool byte_align;
127
128 struct dma_pool *desc_pool; /* Descriptors pool */
129 };
130
131 struct mmp_pdma_phy {
132 int idx;
133 void __iomem *base;
134 struct mmp_pdma_chan *vchan;
135 };
136
137 /**
138 * struct mmp_pdma_ops - Operations for the MMP PDMA controller
139 *
140 * Hardware Register Operations (read/write hardware registers):
141 * @write_next_addr: Function to program address of next descriptor into
142 * DDADR/DDADRH
143 * @read_src_addr: Function to read the source address from DSADR/DSADRH
144 * @read_dst_addr: Function to read the destination address from DTADR/DTADRH
145 *
146 * Descriptor Memory Operations (manipulate descriptor structs in memory):
147 * @set_desc_next_addr: Function to set next descriptor address in descriptor
148 * @set_desc_src_addr: Function to set the source address in descriptor
149 * @set_desc_dst_addr: Function to set the destination address in descriptor
150 * @get_desc_src_addr: Function to get the source address from descriptor
151 * @get_desc_dst_addr: Function to get the destination address from descriptor
152 *
153 * Controller Configuration:
154 * @run_bits: Control bits in DCSR register for channel start/stop
155 * @dma_mask: DMA addressing capability of controller. 0 to use OF/platform
156 * settings, or explicit mask like DMA_BIT_MASK(32/64)
157 */
158 struct mmp_pdma_ops {
159 /* Hardware Register Operations */
160 void (*write_next_addr)(struct mmp_pdma_phy *phy, dma_addr_t addr);
161 u64 (*read_src_addr)(struct mmp_pdma_phy *phy);
162 u64 (*read_dst_addr)(struct mmp_pdma_phy *phy);
163
164 /* Descriptor Memory Operations */
165 void (*set_desc_next_addr)(struct mmp_pdma_desc_hw *desc,
166 dma_addr_t addr);
167 void (*set_desc_src_addr)(struct mmp_pdma_desc_hw *desc,
168 dma_addr_t addr);
169 void (*set_desc_dst_addr)(struct mmp_pdma_desc_hw *desc,
170 dma_addr_t addr);
171 u64 (*get_desc_src_addr)(const struct mmp_pdma_desc_hw *desc);
172 u64 (*get_desc_dst_addr)(const struct mmp_pdma_desc_hw *desc);
173
174 /* Controller Configuration */
175 u32 run_bits;
176 u64 dma_mask;
177 };
178
179 struct mmp_pdma_device {
180 int dma_channels;
181 void __iomem *base;
182 struct device *dev;
183 struct dma_device device;
184 struct mmp_pdma_phy *phy;
185 const struct mmp_pdma_ops *ops;
186 spinlock_t phy_lock; /* protect alloc/free phy channels */
187 };
188
189 #define tx_to_mmp_pdma_desc(tx) \
190 container_of(tx, struct mmp_pdma_desc_sw, async_tx)
191 #define to_mmp_pdma_desc(lh) \
192 container_of(lh, struct mmp_pdma_desc_sw, node)
193 #define to_mmp_pdma_chan(dchan) \
194 container_of(dchan, struct mmp_pdma_chan, chan)
195 #define to_mmp_pdma_dev(dmadev) \
196 container_of(dmadev, struct mmp_pdma_device, device)
197
198 /* For 32-bit PDMA */
write_next_addr_32(struct mmp_pdma_phy * phy,dma_addr_t addr)199 static void write_next_addr_32(struct mmp_pdma_phy *phy, dma_addr_t addr)
200 {
201 writel(addr, phy->base + DDADR(phy->idx));
202 }
203
read_src_addr_32(struct mmp_pdma_phy * phy)204 static u64 read_src_addr_32(struct mmp_pdma_phy *phy)
205 {
206 return readl(phy->base + DSADR(phy->idx));
207 }
208
read_dst_addr_32(struct mmp_pdma_phy * phy)209 static u64 read_dst_addr_32(struct mmp_pdma_phy *phy)
210 {
211 return readl(phy->base + DTADR(phy->idx));
212 }
213
set_desc_next_addr_32(struct mmp_pdma_desc_hw * desc,dma_addr_t addr)214 static void set_desc_next_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
215 {
216 desc->ddadr = addr;
217 }
218
set_desc_src_addr_32(struct mmp_pdma_desc_hw * desc,dma_addr_t addr)219 static void set_desc_src_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
220 {
221 desc->dsadr = addr;
222 }
223
set_desc_dst_addr_32(struct mmp_pdma_desc_hw * desc,dma_addr_t addr)224 static void set_desc_dst_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
225 {
226 desc->dtadr = addr;
227 }
228
get_desc_src_addr_32(const struct mmp_pdma_desc_hw * desc)229 static u64 get_desc_src_addr_32(const struct mmp_pdma_desc_hw *desc)
230 {
231 return desc->dsadr;
232 }
233
get_desc_dst_addr_32(const struct mmp_pdma_desc_hw * desc)234 static u64 get_desc_dst_addr_32(const struct mmp_pdma_desc_hw *desc)
235 {
236 return desc->dtadr;
237 }
238
239 /* For 64-bit PDMA */
write_next_addr_64(struct mmp_pdma_phy * phy,dma_addr_t addr)240 static void write_next_addr_64(struct mmp_pdma_phy *phy, dma_addr_t addr)
241 {
242 writel(lower_32_bits(addr), phy->base + DDADR(phy->idx));
243 writel(upper_32_bits(addr), phy->base + DDADRH(phy->idx));
244 }
245
read_src_addr_64(struct mmp_pdma_phy * phy)246 static u64 read_src_addr_64(struct mmp_pdma_phy *phy)
247 {
248 u32 low = readl(phy->base + DSADR(phy->idx));
249 u32 high = readl(phy->base + DSADRH(phy->idx));
250
251 return ((u64)high << 32) | low;
252 }
253
read_dst_addr_64(struct mmp_pdma_phy * phy)254 static u64 read_dst_addr_64(struct mmp_pdma_phy *phy)
255 {
256 u32 low = readl(phy->base + DTADR(phy->idx));
257 u32 high = readl(phy->base + DTADRH(phy->idx));
258
259 return ((u64)high << 32) | low;
260 }
261
set_desc_next_addr_64(struct mmp_pdma_desc_hw * desc,dma_addr_t addr)262 static void set_desc_next_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
263 {
264 desc->ddadr = lower_32_bits(addr);
265 desc->ddadrh = upper_32_bits(addr);
266 }
267
set_desc_src_addr_64(struct mmp_pdma_desc_hw * desc,dma_addr_t addr)268 static void set_desc_src_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
269 {
270 desc->dsadr = lower_32_bits(addr);
271 desc->dsadrh = upper_32_bits(addr);
272 }
273
set_desc_dst_addr_64(struct mmp_pdma_desc_hw * desc,dma_addr_t addr)274 static void set_desc_dst_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
275 {
276 desc->dtadr = lower_32_bits(addr);
277 desc->dtadrh = upper_32_bits(addr);
278 }
279
get_desc_src_addr_64(const struct mmp_pdma_desc_hw * desc)280 static u64 get_desc_src_addr_64(const struct mmp_pdma_desc_hw *desc)
281 {
282 return ((u64)desc->dsadrh << 32) | desc->dsadr;
283 }
284
get_desc_dst_addr_64(const struct mmp_pdma_desc_hw * desc)285 static u64 get_desc_dst_addr_64(const struct mmp_pdma_desc_hw *desc)
286 {
287 return ((u64)desc->dtadrh << 32) | desc->dtadr;
288 }
289
290 static int mmp_pdma_config_write(struct dma_chan *dchan,
291 struct dma_slave_config *cfg,
292 enum dma_transfer_direction direction);
293
enable_chan(struct mmp_pdma_phy * phy)294 static void enable_chan(struct mmp_pdma_phy *phy)
295 {
296 u32 reg, dalgn;
297 struct mmp_pdma_device *pdev;
298
299 if (!phy->vchan)
300 return;
301
302 pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
303
304 reg = DRCMR(phy->vchan->drcmr);
305 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
306
307 dalgn = readl(phy->base + DALGN);
308 if (phy->vchan->byte_align)
309 dalgn |= 1 << phy->idx;
310 else
311 dalgn &= ~(1 << phy->idx);
312 writel(dalgn, phy->base + DALGN);
313
314 reg = (phy->idx << 2) + DCSR;
315 writel(readl(phy->base + reg) | pdev->ops->run_bits,
316 phy->base + reg);
317 }
318
disable_chan(struct mmp_pdma_phy * phy)319 static void disable_chan(struct mmp_pdma_phy *phy)
320 {
321 u32 reg, dcsr;
322
323 if (!phy)
324 return;
325
326 reg = (phy->idx << 2) + DCSR;
327 dcsr = readl(phy->base + reg);
328
329 if (phy->vchan) {
330 struct mmp_pdma_device *pdev;
331
332 pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
333 writel(dcsr & ~pdev->ops->run_bits, phy->base + reg);
334 } else {
335 /* If no vchan, just clear the RUN bit */
336 writel(dcsr & ~DCSR_RUN, phy->base + reg);
337 }
338 }
339
clear_chan_irq(struct mmp_pdma_phy * phy)340 static int clear_chan_irq(struct mmp_pdma_phy *phy)
341 {
342 u32 dcsr;
343 u32 dint = readl(phy->base + DINT);
344 u32 reg = (phy->idx << 2) + DCSR;
345
346 if (!(dint & BIT(phy->idx)))
347 return -EAGAIN;
348
349 /* clear irq */
350 dcsr = readl(phy->base + reg);
351 writel(dcsr, phy->base + reg);
352 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
353 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
354
355 return 0;
356 }
357
mmp_pdma_chan_handler(int irq,void * dev_id)358 static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
359 {
360 struct mmp_pdma_phy *phy = dev_id;
361
362 if (clear_chan_irq(phy) != 0)
363 return IRQ_NONE;
364
365 tasklet_schedule(&phy->vchan->tasklet);
366 return IRQ_HANDLED;
367 }
368
mmp_pdma_int_handler(int irq,void * dev_id)369 static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
370 {
371 struct mmp_pdma_device *pdev = dev_id;
372 struct mmp_pdma_phy *phy;
373 u32 dint = readl(pdev->base + DINT);
374 int i, ret;
375 int irq_num = 0;
376
377 while (dint) {
378 i = __ffs(dint);
379 /* only handle interrupts belonging to pdma driver*/
380 if (i >= pdev->dma_channels)
381 break;
382 dint &= (dint - 1);
383 phy = &pdev->phy[i];
384 ret = mmp_pdma_chan_handler(irq, phy);
385 if (ret == IRQ_HANDLED)
386 irq_num++;
387 }
388
389 if (irq_num)
390 return IRQ_HANDLED;
391
392 return IRQ_NONE;
393 }
394
395 /* lookup free phy channel as descending priority */
lookup_phy(struct mmp_pdma_chan * pchan)396 static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
397 {
398 int prio, i;
399 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
400 struct mmp_pdma_phy *phy, *found = NULL;
401 unsigned long flags;
402
403 /*
404 * dma channel priorities
405 * ch 0 - 3, 16 - 19 <--> (0)
406 * ch 4 - 7, 20 - 23 <--> (1)
407 * ch 8 - 11, 24 - 27 <--> (2)
408 * ch 12 - 15, 28 - 31 <--> (3)
409 */
410
411 spin_lock_irqsave(&pdev->phy_lock, flags);
412 for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
413 for (i = 0; i < pdev->dma_channels; i++) {
414 if (prio != (i & 0xf) >> 2)
415 continue;
416 phy = &pdev->phy[i];
417 if (!phy->vchan) {
418 phy->vchan = pchan;
419 found = phy;
420 goto out_unlock;
421 }
422 }
423 }
424
425 out_unlock:
426 spin_unlock_irqrestore(&pdev->phy_lock, flags);
427 return found;
428 }
429
mmp_pdma_free_phy(struct mmp_pdma_chan * pchan)430 static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
431 {
432 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
433 unsigned long flags;
434 u32 reg;
435
436 if (!pchan->phy)
437 return;
438
439 /* clear the channel mapping in DRCMR */
440 reg = DRCMR(pchan->drcmr);
441 writel(0, pchan->phy->base + reg);
442
443 spin_lock_irqsave(&pdev->phy_lock, flags);
444 pchan->phy->vchan = NULL;
445 pchan->phy = NULL;
446 spin_unlock_irqrestore(&pdev->phy_lock, flags);
447 }
448
449 /*
450 * start_pending_queue - transfer any pending transactions
451 * pending list ==> running list
452 */
start_pending_queue(struct mmp_pdma_chan * chan)453 static void start_pending_queue(struct mmp_pdma_chan *chan)
454 {
455 struct mmp_pdma_desc_sw *desc;
456 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
457
458 /* still in running, irq will start the pending list */
459 if (!chan->idle) {
460 dev_dbg(chan->dev, "DMA controller still busy\n");
461 return;
462 }
463
464 if (list_empty(&chan->chain_pending)) {
465 /* chance to re-fetch phy channel with higher prio */
466 mmp_pdma_free_phy(chan);
467 dev_dbg(chan->dev, "no pending list\n");
468 return;
469 }
470
471 if (!chan->phy) {
472 chan->phy = lookup_phy(chan);
473 if (!chan->phy) {
474 dev_dbg(chan->dev, "no free dma channel\n");
475 return;
476 }
477 }
478
479 /*
480 * pending -> running
481 * reintilize pending list
482 */
483 desc = list_first_entry(&chan->chain_pending,
484 struct mmp_pdma_desc_sw, node);
485 list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
486
487 /*
488 * Program the descriptor's address into the DMA controller,
489 * then start the DMA transaction
490 */
491 pdev->ops->write_next_addr(chan->phy, desc->async_tx.phys);
492 enable_chan(chan->phy);
493 chan->idle = false;
494 }
495
496
497 /* desc->tx_list ==> pending list */
mmp_pdma_tx_submit(struct dma_async_tx_descriptor * tx)498 static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
499 {
500 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
501 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
502 struct mmp_pdma_desc_sw *child;
503 unsigned long flags;
504 dma_cookie_t cookie = -EBUSY;
505
506 spin_lock_irqsave(&chan->desc_lock, flags);
507
508 list_for_each_entry(child, &desc->tx_list, node) {
509 cookie = dma_cookie_assign(&child->async_tx);
510 }
511
512 /* softly link to pending list - desc->tx_list ==> pending list */
513 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
514
515 spin_unlock_irqrestore(&chan->desc_lock, flags);
516
517 return cookie;
518 }
519
520 static struct mmp_pdma_desc_sw *
mmp_pdma_alloc_descriptor(struct mmp_pdma_chan * chan)521 mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
522 {
523 struct mmp_pdma_desc_sw *desc;
524 dma_addr_t pdesc;
525
526 desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
527 if (!desc) {
528 dev_err(chan->dev, "out of memory for link descriptor\n");
529 return NULL;
530 }
531
532 INIT_LIST_HEAD(&desc->tx_list);
533 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
534 /* each desc has submit */
535 desc->async_tx.tx_submit = mmp_pdma_tx_submit;
536 desc->async_tx.phys = pdesc;
537
538 return desc;
539 }
540
541 /*
542 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
543 *
544 * This function will create a dma pool for descriptor allocation.
545 * Request irq only when channel is requested
546 * Return - The number of allocated descriptors.
547 */
548
mmp_pdma_alloc_chan_resources(struct dma_chan * dchan)549 static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
550 {
551 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
552
553 if (chan->desc_pool)
554 return 1;
555
556 chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
557 chan->dev,
558 sizeof(struct mmp_pdma_desc_sw),
559 __alignof__(struct mmp_pdma_desc_sw),
560 0);
561 if (!chan->desc_pool) {
562 dev_err(chan->dev, "unable to allocate descriptor pool\n");
563 return -ENOMEM;
564 }
565
566 mmp_pdma_free_phy(chan);
567 chan->idle = true;
568 chan->dev_addr = 0;
569 return 1;
570 }
571
mmp_pdma_free_desc_list(struct mmp_pdma_chan * chan,struct list_head * list)572 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
573 struct list_head *list)
574 {
575 struct mmp_pdma_desc_sw *desc, *_desc;
576
577 list_for_each_entry_safe(desc, _desc, list, node) {
578 list_del(&desc->node);
579 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
580 }
581 }
582
mmp_pdma_free_chan_resources(struct dma_chan * dchan)583 static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
584 {
585 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
586 unsigned long flags;
587
588 spin_lock_irqsave(&chan->desc_lock, flags);
589 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
590 mmp_pdma_free_desc_list(chan, &chan->chain_running);
591 spin_unlock_irqrestore(&chan->desc_lock, flags);
592
593 dma_pool_destroy(chan->desc_pool);
594 chan->desc_pool = NULL;
595 chan->idle = true;
596 chan->dev_addr = 0;
597 mmp_pdma_free_phy(chan);
598 return;
599 }
600
601 static struct dma_async_tx_descriptor *
mmp_pdma_prep_memcpy(struct dma_chan * dchan,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,unsigned long flags)602 mmp_pdma_prep_memcpy(struct dma_chan *dchan,
603 dma_addr_t dma_dst, dma_addr_t dma_src,
604 size_t len, unsigned long flags)
605 {
606 struct mmp_pdma_chan *chan;
607 struct mmp_pdma_device *pdev;
608 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
609 size_t copy = 0;
610
611 if (!dchan || !len)
612 return NULL;
613
614 pdev = to_mmp_pdma_dev(dchan->device);
615 chan = to_mmp_pdma_chan(dchan);
616 chan->byte_align = false;
617
618 if (!chan->dir) {
619 chan->dir = DMA_MEM_TO_MEM;
620 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
621 chan->dcmd |= DCMD_BURST32;
622 }
623
624 do {
625 /* Allocate the link descriptor from DMA pool */
626 new = mmp_pdma_alloc_descriptor(chan);
627 if (!new) {
628 dev_err(chan->dev, "no memory for desc\n");
629 goto fail;
630 }
631
632 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
633 if (dma_src & 0x7 || dma_dst & 0x7)
634 chan->byte_align = true;
635
636 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
637 pdev->ops->set_desc_src_addr(&new->desc, dma_src);
638 pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
639
640 if (!first)
641 first = new;
642 else
643 pdev->ops->set_desc_next_addr(&prev->desc,
644 new->async_tx.phys);
645
646 new->async_tx.cookie = 0;
647 async_tx_ack(&new->async_tx);
648
649 prev = new;
650 len -= copy;
651
652 if (chan->dir == DMA_MEM_TO_DEV) {
653 dma_src += copy;
654 } else if (chan->dir == DMA_DEV_TO_MEM) {
655 dma_dst += copy;
656 } else if (chan->dir == DMA_MEM_TO_MEM) {
657 dma_src += copy;
658 dma_dst += copy;
659 }
660
661 /* Insert the link descriptor to the LD ring */
662 list_add_tail(&new->node, &first->tx_list);
663 } while (len);
664
665 first->async_tx.flags = flags; /* client is in control of this ack */
666 first->async_tx.cookie = -EBUSY;
667
668 /* last desc and fire IRQ */
669 new->desc.ddadr = DDADR_STOP;
670 new->desc.dcmd |= DCMD_ENDIRQEN;
671
672 chan->cyclic_first = NULL;
673
674 return &first->async_tx;
675
676 fail:
677 if (first)
678 mmp_pdma_free_desc_list(chan, &first->tx_list);
679 return NULL;
680 }
681
682 static struct dma_async_tx_descriptor *
mmp_pdma_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * context)683 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
684 unsigned int sg_len, enum dma_transfer_direction dir,
685 unsigned long flags, void *context)
686 {
687 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
688 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(dchan->device);
689 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
690 size_t len, avail;
691 struct scatterlist *sg;
692 dma_addr_t addr;
693 int i;
694
695 if ((sgl == NULL) || (sg_len == 0))
696 return NULL;
697
698 chan->byte_align = false;
699
700 mmp_pdma_config_write(dchan, &chan->slave_config, dir);
701
702 for_each_sg(sgl, sg, sg_len, i) {
703 addr = sg_dma_address(sg);
704 avail = sg_dma_len(sgl);
705
706 do {
707 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
708 if (addr & 0x7)
709 chan->byte_align = true;
710
711 /* allocate and populate the descriptor */
712 new = mmp_pdma_alloc_descriptor(chan);
713 if (!new) {
714 dev_err(chan->dev, "no memory for desc\n");
715 goto fail;
716 }
717
718 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
719 if (dir == DMA_MEM_TO_DEV) {
720 pdev->ops->set_desc_src_addr(&new->desc, addr);
721 new->desc.dtadr = chan->dev_addr;
722 } else {
723 new->desc.dsadr = chan->dev_addr;
724 pdev->ops->set_desc_dst_addr(&new->desc, addr);
725 }
726
727 if (!first)
728 first = new;
729 else
730 pdev->ops->set_desc_next_addr(&prev->desc,
731 new->async_tx.phys);
732
733 new->async_tx.cookie = 0;
734 async_tx_ack(&new->async_tx);
735 prev = new;
736
737 /* Insert the link descriptor to the LD ring */
738 list_add_tail(&new->node, &first->tx_list);
739
740 /* update metadata */
741 addr += len;
742 avail -= len;
743 } while (avail);
744 }
745
746 first->async_tx.cookie = -EBUSY;
747 first->async_tx.flags = flags;
748
749 /* last desc and fire IRQ */
750 new->desc.ddadr = DDADR_STOP;
751 new->desc.dcmd |= DCMD_ENDIRQEN;
752
753 chan->dir = dir;
754 chan->cyclic_first = NULL;
755
756 return &first->async_tx;
757
758 fail:
759 if (first)
760 mmp_pdma_free_desc_list(chan, &first->tx_list);
761 return NULL;
762 }
763
764 static struct dma_async_tx_descriptor *
mmp_pdma_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t buf_addr,size_t len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)765 mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
766 dma_addr_t buf_addr, size_t len, size_t period_len,
767 enum dma_transfer_direction direction,
768 unsigned long flags)
769 {
770 struct mmp_pdma_chan *chan;
771 struct mmp_pdma_device *pdev;
772 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
773 dma_addr_t dma_src, dma_dst;
774
775 if (!dchan || !len || !period_len)
776 return NULL;
777
778 pdev = to_mmp_pdma_dev(dchan->device);
779
780 /* the buffer length must be a multiple of period_len */
781 if (len % period_len != 0)
782 return NULL;
783
784 if (period_len > PDMA_MAX_DESC_BYTES)
785 return NULL;
786
787 chan = to_mmp_pdma_chan(dchan);
788 mmp_pdma_config_write(dchan, &chan->slave_config, direction);
789
790 switch (direction) {
791 case DMA_MEM_TO_DEV:
792 dma_src = buf_addr;
793 dma_dst = chan->dev_addr;
794 break;
795 case DMA_DEV_TO_MEM:
796 dma_dst = buf_addr;
797 dma_src = chan->dev_addr;
798 break;
799 default:
800 dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
801 return NULL;
802 }
803
804 chan->dir = direction;
805
806 do {
807 /* Allocate the link descriptor from DMA pool */
808 new = mmp_pdma_alloc_descriptor(chan);
809 if (!new) {
810 dev_err(chan->dev, "no memory for desc\n");
811 goto fail;
812 }
813
814 new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
815 (DCMD_LENGTH & period_len));
816 pdev->ops->set_desc_src_addr(&new->desc, dma_src);
817 pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
818
819 if (!first)
820 first = new;
821 else
822 pdev->ops->set_desc_next_addr(&prev->desc,
823 new->async_tx.phys);
824
825 new->async_tx.cookie = 0;
826 async_tx_ack(&new->async_tx);
827
828 prev = new;
829 len -= period_len;
830
831 if (chan->dir == DMA_MEM_TO_DEV)
832 dma_src += period_len;
833 else
834 dma_dst += period_len;
835
836 /* Insert the link descriptor to the LD ring */
837 list_add_tail(&new->node, &first->tx_list);
838 } while (len);
839
840 first->async_tx.flags = flags; /* client is in control of this ack */
841 first->async_tx.cookie = -EBUSY;
842
843 /* make the cyclic link */
844 pdev->ops->set_desc_next_addr(&new->desc, first->async_tx.phys);
845 chan->cyclic_first = first;
846
847 return &first->async_tx;
848
849 fail:
850 if (first)
851 mmp_pdma_free_desc_list(chan, &first->tx_list);
852 return NULL;
853 }
854
mmp_pdma_config_write(struct dma_chan * dchan,struct dma_slave_config * cfg,enum dma_transfer_direction direction)855 static int mmp_pdma_config_write(struct dma_chan *dchan,
856 struct dma_slave_config *cfg,
857 enum dma_transfer_direction direction)
858 {
859 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
860 u32 maxburst = 0, addr = 0;
861 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
862
863 if (!dchan)
864 return -EINVAL;
865
866 if (direction == DMA_DEV_TO_MEM) {
867 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
868 maxburst = cfg->src_maxburst;
869 width = cfg->src_addr_width;
870 addr = cfg->src_addr;
871 } else if (direction == DMA_MEM_TO_DEV) {
872 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
873 maxburst = cfg->dst_maxburst;
874 width = cfg->dst_addr_width;
875 addr = cfg->dst_addr;
876 }
877
878 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
879 chan->dcmd |= DCMD_WIDTH1;
880 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
881 chan->dcmd |= DCMD_WIDTH2;
882 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
883 chan->dcmd |= DCMD_WIDTH4;
884
885 if (maxburst == 8)
886 chan->dcmd |= DCMD_BURST8;
887 else if (maxburst == 16)
888 chan->dcmd |= DCMD_BURST16;
889 else if (maxburst == 32)
890 chan->dcmd |= DCMD_BURST32;
891
892 chan->dir = direction;
893 chan->dev_addr = addr;
894
895 return 0;
896 }
897
mmp_pdma_config(struct dma_chan * dchan,struct dma_slave_config * cfg)898 static int mmp_pdma_config(struct dma_chan *dchan,
899 struct dma_slave_config *cfg)
900 {
901 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
902
903 memcpy(&chan->slave_config, cfg, sizeof(*cfg));
904 return 0;
905 }
906
mmp_pdma_terminate_all(struct dma_chan * dchan)907 static int mmp_pdma_terminate_all(struct dma_chan *dchan)
908 {
909 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
910 unsigned long flags;
911
912 if (!dchan)
913 return -EINVAL;
914
915 disable_chan(chan->phy);
916 mmp_pdma_free_phy(chan);
917 spin_lock_irqsave(&chan->desc_lock, flags);
918 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
919 mmp_pdma_free_desc_list(chan, &chan->chain_running);
920 spin_unlock_irqrestore(&chan->desc_lock, flags);
921 chan->idle = true;
922
923 return 0;
924 }
925
mmp_pdma_residue(struct mmp_pdma_chan * chan,dma_cookie_t cookie)926 static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
927 dma_cookie_t cookie)
928 {
929 struct mmp_pdma_desc_sw *sw;
930 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
931 u64 curr;
932 u32 residue = 0;
933 bool passed = false;
934 bool cyclic = chan->cyclic_first != NULL;
935
936 /*
937 * If the channel does not have a phy pointer anymore, it has already
938 * been completed. Therefore, its residue is 0.
939 */
940 if (!chan->phy)
941 return 0;
942
943 if (chan->dir == DMA_DEV_TO_MEM)
944 curr = pdev->ops->read_dst_addr(chan->phy);
945 else
946 curr = pdev->ops->read_src_addr(chan->phy);
947
948 list_for_each_entry(sw, &chan->chain_running, node) {
949 u64 start, end;
950 u32 len;
951
952 if (chan->dir == DMA_DEV_TO_MEM)
953 start = pdev->ops->get_desc_dst_addr(&sw->desc);
954 else
955 start = pdev->ops->get_desc_src_addr(&sw->desc);
956
957 len = sw->desc.dcmd & DCMD_LENGTH;
958 end = start + len;
959
960 /*
961 * 'passed' will be latched once we found the descriptor which
962 * lies inside the boundaries of the curr pointer. All
963 * descriptors that occur in the list _after_ we found that
964 * partially handled descriptor are still to be processed and
965 * are hence added to the residual bytes counter.
966 */
967
968 if (passed) {
969 residue += len;
970 } else if (curr >= start && curr <= end) {
971 residue += (u32)(end - curr);
972 passed = true;
973 }
974
975 /*
976 * Descriptors that have the ENDIRQEN bit set mark the end of a
977 * transaction chain, and the cookie assigned with it has been
978 * returned previously from mmp_pdma_tx_submit().
979 *
980 * In case we have multiple transactions in the running chain,
981 * and the cookie does not match the one the user asked us
982 * about, reset the state variables and start over.
983 *
984 * This logic does not apply to cyclic transactions, where all
985 * descriptors have the ENDIRQEN bit set, and for which we
986 * can't have multiple transactions on one channel anyway.
987 */
988 if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
989 continue;
990
991 if (sw->async_tx.cookie == cookie) {
992 return residue;
993 } else {
994 residue = 0;
995 passed = false;
996 }
997 }
998
999 /* We should only get here in case of cyclic transactions */
1000 return residue;
1001 }
1002
mmp_pdma_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)1003 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
1004 dma_cookie_t cookie,
1005 struct dma_tx_state *txstate)
1006 {
1007 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
1008 enum dma_status ret;
1009
1010 ret = dma_cookie_status(dchan, cookie, txstate);
1011 if (likely(ret != DMA_ERROR))
1012 dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
1013
1014 return ret;
1015 }
1016
1017 /*
1018 * mmp_pdma_issue_pending - Issue the DMA start command
1019 * pending list ==> running list
1020 */
mmp_pdma_issue_pending(struct dma_chan * dchan)1021 static void mmp_pdma_issue_pending(struct dma_chan *dchan)
1022 {
1023 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
1024 unsigned long flags;
1025
1026 spin_lock_irqsave(&chan->desc_lock, flags);
1027 start_pending_queue(chan);
1028 spin_unlock_irqrestore(&chan->desc_lock, flags);
1029 }
1030
1031 /*
1032 * dma_do_tasklet
1033 * Do call back
1034 * Start pending list
1035 */
dma_do_tasklet(struct tasklet_struct * t)1036 static void dma_do_tasklet(struct tasklet_struct *t)
1037 {
1038 struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet);
1039 struct mmp_pdma_desc_sw *desc, *_desc;
1040 LIST_HEAD(chain_cleanup);
1041 unsigned long flags;
1042 struct dmaengine_desc_callback cb;
1043
1044 if (chan->cyclic_first) {
1045 spin_lock_irqsave(&chan->desc_lock, flags);
1046 desc = chan->cyclic_first;
1047 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1048 spin_unlock_irqrestore(&chan->desc_lock, flags);
1049
1050 dmaengine_desc_callback_invoke(&cb, NULL);
1051
1052 return;
1053 }
1054
1055 /* submit pending list; callback for each desc; free desc */
1056 spin_lock_irqsave(&chan->desc_lock, flags);
1057
1058 list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
1059 /*
1060 * move the descriptors to a temporary list so we can drop
1061 * the lock during the entire cleanup operation
1062 */
1063 list_move(&desc->node, &chain_cleanup);
1064
1065 /*
1066 * Look for the first list entry which has the ENDIRQEN flag
1067 * set. That is the descriptor we got an interrupt for, so
1068 * complete that transaction and its cookie.
1069 */
1070 if (desc->desc.dcmd & DCMD_ENDIRQEN) {
1071 dma_cookie_t cookie = desc->async_tx.cookie;
1072 dma_cookie_complete(&desc->async_tx);
1073 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
1074 break;
1075 }
1076 }
1077
1078 /*
1079 * The hardware is idle and ready for more when the
1080 * chain_running list is empty.
1081 */
1082 chan->idle = list_empty(&chan->chain_running);
1083
1084 /* Start any pending transactions automatically */
1085 start_pending_queue(chan);
1086 spin_unlock_irqrestore(&chan->desc_lock, flags);
1087
1088 /* Run the callback for each descriptor, in order */
1089 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
1090 struct dma_async_tx_descriptor *txd = &desc->async_tx;
1091
1092 /* Remove from the list of transactions */
1093 list_del(&desc->node);
1094 /* Run the link descriptor callback function */
1095 dmaengine_desc_get_callback(txd, &cb);
1096 dmaengine_desc_callback_invoke(&cb, NULL);
1097
1098 dma_pool_free(chan->desc_pool, desc, txd->phys);
1099 }
1100 }
1101
mmp_pdma_remove(struct platform_device * op)1102 static void mmp_pdma_remove(struct platform_device *op)
1103 {
1104 struct mmp_pdma_device *pdev = platform_get_drvdata(op);
1105 struct mmp_pdma_phy *phy;
1106 int i, irq = 0, irq_num = 0;
1107
1108 if (op->dev.of_node)
1109 of_dma_controller_free(op->dev.of_node);
1110
1111 for (i = 0; i < pdev->dma_channels; i++) {
1112 if (platform_get_irq(op, i) > 0)
1113 irq_num++;
1114 }
1115
1116 if (irq_num != pdev->dma_channels) {
1117 irq = platform_get_irq(op, 0);
1118 devm_free_irq(&op->dev, irq, pdev);
1119 } else {
1120 for (i = 0; i < pdev->dma_channels; i++) {
1121 phy = &pdev->phy[i];
1122 irq = platform_get_irq(op, i);
1123 devm_free_irq(&op->dev, irq, phy);
1124 }
1125 }
1126
1127 dma_async_device_unregister(&pdev->device);
1128 }
1129
mmp_pdma_chan_init(struct mmp_pdma_device * pdev,int idx,int irq)1130 static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
1131 {
1132 struct mmp_pdma_phy *phy = &pdev->phy[idx];
1133 struct mmp_pdma_chan *chan;
1134 int ret;
1135
1136 chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
1137 if (chan == NULL)
1138 return -ENOMEM;
1139
1140 phy->idx = idx;
1141 phy->base = pdev->base;
1142
1143 if (irq) {
1144 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
1145 IRQF_SHARED, "pdma", phy);
1146 if (ret) {
1147 dev_err(pdev->dev, "channel request irq fail!\n");
1148 return ret;
1149 }
1150 }
1151
1152 spin_lock_init(&chan->desc_lock);
1153 chan->dev = pdev->dev;
1154 chan->chan.device = &pdev->device;
1155 tasklet_setup(&chan->tasklet, dma_do_tasklet);
1156 INIT_LIST_HEAD(&chan->chain_pending);
1157 INIT_LIST_HEAD(&chan->chain_running);
1158
1159 /* register virt channel to dma engine */
1160 list_add_tail(&chan->chan.device_node, &pdev->device.channels);
1161
1162 return 0;
1163 }
1164
1165 static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
1166 .write_next_addr = write_next_addr_32,
1167 .read_src_addr = read_src_addr_32,
1168 .read_dst_addr = read_dst_addr_32,
1169 .set_desc_next_addr = set_desc_next_addr_32,
1170 .set_desc_src_addr = set_desc_src_addr_32,
1171 .set_desc_dst_addr = set_desc_dst_addr_32,
1172 .get_desc_src_addr = get_desc_src_addr_32,
1173 .get_desc_dst_addr = get_desc_dst_addr_32,
1174 .run_bits = (DCSR_RUN),
1175 .dma_mask = 0, /* let OF/platform set DMA mask */
1176 };
1177
1178 static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
1179 .write_next_addr = write_next_addr_64,
1180 .read_src_addr = read_src_addr_64,
1181 .read_dst_addr = read_dst_addr_64,
1182 .set_desc_next_addr = set_desc_next_addr_64,
1183 .set_desc_src_addr = set_desc_src_addr_64,
1184 .set_desc_dst_addr = set_desc_dst_addr_64,
1185 .get_desc_src_addr = get_desc_src_addr_64,
1186 .get_desc_dst_addr = get_desc_dst_addr_64,
1187 .run_bits = (DCSR_RUN | DCSR_LPAEEN),
1188 .dma_mask = DMA_BIT_MASK(64), /* force 64-bit DMA addr capability */
1189 };
1190
1191 static const struct of_device_id mmp_pdma_dt_ids[] = {
1192 {
1193 .compatible = "marvell,pdma-1.0",
1194 .data = &marvell_pdma_v1_ops
1195 }, {
1196 .compatible = "spacemit,k1-pdma",
1197 .data = &spacemit_k1_pdma_ops
1198 }, {
1199 /* sentinel */
1200 }
1201 };
1202 MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
1203
mmp_pdma_dma_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1204 static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
1205 struct of_dma *ofdma)
1206 {
1207 struct mmp_pdma_device *d = ofdma->of_dma_data;
1208 struct dma_chan *chan;
1209
1210 chan = dma_get_any_slave_channel(&d->device);
1211 if (!chan)
1212 return NULL;
1213
1214 to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
1215
1216 return chan;
1217 }
1218
mmp_pdma_probe(struct platform_device * op)1219 static int mmp_pdma_probe(struct platform_device *op)
1220 {
1221 struct mmp_pdma_device *pdev;
1222 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1223 struct clk *clk;
1224 struct reset_control *rst;
1225 int i, ret, irq = 0;
1226 int dma_channels = 0, irq_num = 0;
1227 const enum dma_slave_buswidth widths =
1228 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1229 DMA_SLAVE_BUSWIDTH_4_BYTES;
1230
1231 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1232 if (!pdev)
1233 return -ENOMEM;
1234
1235 pdev->dev = &op->dev;
1236
1237 spin_lock_init(&pdev->phy_lock);
1238
1239 pdev->base = devm_platform_ioremap_resource(op, 0);
1240 if (IS_ERR(pdev->base))
1241 return PTR_ERR(pdev->base);
1242
1243 clk = devm_clk_get_optional_enabled(pdev->dev, NULL);
1244 if (IS_ERR(clk))
1245 return PTR_ERR(clk);
1246
1247 rst = devm_reset_control_get_optional_exclusive_deasserted(pdev->dev,
1248 NULL);
1249 if (IS_ERR(rst))
1250 return PTR_ERR(rst);
1251
1252 pdev->ops = of_device_get_match_data(&op->dev);
1253 if (!pdev->ops)
1254 return -ENODEV;
1255
1256 if (pdev->dev->of_node) {
1257 /* Parse new and deprecated dma-channels properties */
1258 if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
1259 &dma_channels))
1260 of_property_read_u32(pdev->dev->of_node, "#dma-channels",
1261 &dma_channels);
1262 } else if (pdata && pdata->dma_channels) {
1263 dma_channels = pdata->dma_channels;
1264 } else {
1265 dma_channels = 32; /* default 32 channel */
1266 }
1267 pdev->dma_channels = dma_channels;
1268
1269 for (i = 0; i < dma_channels; i++) {
1270 if (platform_get_irq_optional(op, i) > 0)
1271 irq_num++;
1272 }
1273
1274 pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
1275 GFP_KERNEL);
1276 if (pdev->phy == NULL)
1277 return -ENOMEM;
1278
1279 INIT_LIST_HEAD(&pdev->device.channels);
1280
1281 if (irq_num != dma_channels) {
1282 /* all chan share one irq, demux inside */
1283 irq = platform_get_irq(op, 0);
1284 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
1285 IRQF_SHARED, "pdma", pdev);
1286 if (ret)
1287 return ret;
1288 }
1289
1290 for (i = 0; i < dma_channels; i++) {
1291 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
1292 ret = mmp_pdma_chan_init(pdev, i, irq);
1293 if (ret)
1294 return ret;
1295 }
1296
1297 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
1298 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
1299 dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
1300 dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
1301 pdev->device.dev = &op->dev;
1302 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
1303 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
1304 pdev->device.device_tx_status = mmp_pdma_tx_status;
1305 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
1306 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
1307 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
1308 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
1309 pdev->device.device_config = mmp_pdma_config;
1310 pdev->device.device_terminate_all = mmp_pdma_terminate_all;
1311 pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
1312 pdev->device.src_addr_widths = widths;
1313 pdev->device.dst_addr_widths = widths;
1314 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1315 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1316
1317 /* Set DMA mask based on ops->dma_mask, or OF/platform */
1318 if (pdev->ops->dma_mask)
1319 dma_set_mask(pdev->dev, pdev->ops->dma_mask);
1320 else if (pdev->dev->coherent_dma_mask)
1321 dma_set_mask(pdev->dev, pdev->dev->coherent_dma_mask);
1322 else
1323 dma_set_mask(pdev->dev, DMA_BIT_MASK(64));
1324
1325 ret = dma_async_device_register(&pdev->device);
1326 if (ret) {
1327 dev_err(pdev->device.dev, "unable to register\n");
1328 return ret;
1329 }
1330
1331 if (op->dev.of_node) {
1332 /* Device-tree DMA controller registration */
1333 ret = of_dma_controller_register(op->dev.of_node,
1334 mmp_pdma_dma_xlate, pdev);
1335 if (ret < 0) {
1336 dev_err(&op->dev, "of_dma_controller_register failed\n");
1337 dma_async_device_unregister(&pdev->device);
1338 return ret;
1339 }
1340 }
1341
1342 platform_set_drvdata(op, pdev);
1343 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
1344 return 0;
1345 }
1346
1347 static const struct platform_device_id mmp_pdma_id_table[] = {
1348 { "mmp-pdma", },
1349 { },
1350 };
1351
1352 static struct platform_driver mmp_pdma_driver = {
1353 .driver = {
1354 .name = "mmp-pdma",
1355 .of_match_table = mmp_pdma_dt_ids,
1356 },
1357 .id_table = mmp_pdma_id_table,
1358 .probe = mmp_pdma_probe,
1359 .remove = mmp_pdma_remove,
1360 };
1361
1362 module_platform_driver(mmp_pdma_driver);
1363
1364 MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
1365 MODULE_AUTHOR("Marvell International Ltd.");
1366 MODULE_LICENSE("GPL v2");
1367