1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2012 Marvell International Ltd.
4 */
5
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/device.h>
16 #include <linux/platform_data/mmp_dma.h>
17 #include <linux/dmapool.h>
18 #include <linux/clk.h>
19 #include <linux/reset.h>
20 #include <linux/of_dma.h>
21 #include <linux/of.h>
22
23 #include "dmaengine.h"
24
25 #define DCSR 0x0000
26 #define DALGN 0x00a0
27 #define DINT 0x00f0
28 #define DDADR(n) (0x0200 + ((n) << 4))
29 #define DSADR(n) (0x0204 + ((n) << 4))
30 #define DTADR(n) (0x0208 + ((n) << 4))
31 #define DDADRH(n) (0x0300 + ((n) << 4))
32 #define DSADRH(n) (0x0304 + ((n) << 4))
33 #define DTADRH(n) (0x0308 + ((n) << 4))
34 #define DCMD 0x020c
35
36 #define DCSR_RUN BIT(31) /* Run Bit (read / write) */
37 #define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
38 #define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */
39 #define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
40 #define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
41 #define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
42 #define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
43 #define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
44
45 #define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */
46 #define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
47 #define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
48 #define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
49 #define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
50 #define DCSR_LPAEEN BIT(21) /* Long Physical Address Extension Enable */
51 #define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
52 #define DCSR_EORINTR BIT(9) /* The end of Receive */
53
54 #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2))
55 #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
56 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
57
58 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
59 #define DDADR_STOP BIT(0) /* Stop (read / write) */
60
61 #define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
62 #define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
63 #define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
64 #define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
65 #define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
66 #define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
67 #define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
68 #define DCMD_BURST8 (1 << 16) /* 8 byte burst */
69 #define DCMD_BURST16 (2 << 16) /* 16 byte burst */
70 #define DCMD_BURST32 (3 << 16) /* 32 byte burst */
71 #define DCMD_WIDTH1 (1 << 14) /* 1 byte width */
72 #define DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
73 #define DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
74 #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
75
76 #define PDMA_MAX_DESC_BYTES DCMD_LENGTH
77
78 struct mmp_pdma_desc_hw {
79 u32 ddadr; /* Points to the next descriptor + flags */
80 u32 dsadr; /* DSADR value for the current transfer */
81 u32 dtadr; /* DTADR value for the current transfer */
82 u32 dcmd; /* DCMD value for the current transfer */
83 /*
84 * The following 32-bit words are only used in the 64-bit, ie.
85 * LPAE (Long Physical Address Extension) mode.
86 * They are used to specify the high 32 bits of the descriptor's
87 * addresses.
88 */
89 u32 ddadrh; /* High 32-bit of DDADR */
90 u32 dsadrh; /* High 32-bit of DSADR */
91 u32 dtadrh; /* High 32-bit of DTADR */
92 u32 rsvd; /* reserved */
93 } __aligned(32);
94
95 struct mmp_pdma_desc_sw {
96 struct mmp_pdma_desc_hw desc;
97 struct list_head node;
98 struct list_head tx_list;
99 struct dma_async_tx_descriptor async_tx;
100 };
101
102 struct mmp_pdma_phy;
103
104 struct mmp_pdma_chan {
105 struct device *dev;
106 struct dma_chan chan;
107 struct dma_async_tx_descriptor desc;
108 struct mmp_pdma_phy *phy;
109 enum dma_transfer_direction dir;
110 struct dma_slave_config slave_config;
111
112 struct mmp_pdma_desc_sw *cyclic_first; /* first desc_sw if channel
113 * is in cyclic mode */
114
115 /* channel's basic info */
116 struct tasklet_struct tasklet;
117 u32 dcmd;
118 u32 drcmr;
119 u32 dev_addr;
120
121 /* list for desc */
122 spinlock_t desc_lock; /* Descriptor list lock */
123 struct list_head chain_pending; /* Link descriptors queue for pending */
124 struct list_head chain_running; /* Link descriptors queue for running */
125 bool idle; /* channel statue machine */
126 bool byte_align;
127
128 struct dma_pool *desc_pool; /* Descriptors pool */
129 };
130
131 struct mmp_pdma_phy {
132 int idx;
133 void __iomem *base;
134 struct mmp_pdma_chan *vchan;
135 };
136
137 /**
138 * struct mmp_pdma_ops - Operations for the MMP PDMA controller
139 *
140 * Hardware Register Operations (read/write hardware registers):
141 * @write_next_addr: Function to program address of next descriptor into
142 * DDADR/DDADRH
143 * @read_src_addr: Function to read the source address from DSADR/DSADRH
144 * @read_dst_addr: Function to read the destination address from DTADR/DTADRH
145 *
146 * Descriptor Memory Operations (manipulate descriptor structs in memory):
147 * @set_desc_next_addr: Function to set next descriptor address in descriptor
148 * @set_desc_src_addr: Function to set the source address in descriptor
149 * @set_desc_dst_addr: Function to set the destination address in descriptor
150 * @get_desc_src_addr: Function to get the source address from descriptor
151 * @get_desc_dst_addr: Function to get the destination address from descriptor
152 *
153 * Controller Configuration:
154 * @run_bits: Control bits in DCSR register for channel start/stop
155 * @dma_width: DMA addressing width in bits (32 or 64). Determines the
156 * DMA mask capability of the controller hardware.
157 */
158 struct mmp_pdma_ops {
159 /* Hardware Register Operations */
160 void (*write_next_addr)(struct mmp_pdma_phy *phy, dma_addr_t addr);
161 u64 (*read_src_addr)(struct mmp_pdma_phy *phy);
162 u64 (*read_dst_addr)(struct mmp_pdma_phy *phy);
163
164 /* Descriptor Memory Operations */
165 void (*set_desc_next_addr)(struct mmp_pdma_desc_hw *desc,
166 dma_addr_t addr);
167 void (*set_desc_src_addr)(struct mmp_pdma_desc_hw *desc,
168 dma_addr_t addr);
169 void (*set_desc_dst_addr)(struct mmp_pdma_desc_hw *desc,
170 dma_addr_t addr);
171 u64 (*get_desc_src_addr)(const struct mmp_pdma_desc_hw *desc);
172 u64 (*get_desc_dst_addr)(const struct mmp_pdma_desc_hw *desc);
173
174 /* Controller Configuration */
175 u32 run_bits;
176 u32 dma_width;
177 };
178
179 struct mmp_pdma_device {
180 int dma_channels;
181 void __iomem *base;
182 struct device *dev;
183 struct dma_device device;
184 struct mmp_pdma_phy *phy;
185 const struct mmp_pdma_ops *ops;
186 spinlock_t phy_lock; /* protect alloc/free phy channels */
187 };
188
189 #define tx_to_mmp_pdma_desc(tx) \
190 container_of(tx, struct mmp_pdma_desc_sw, async_tx)
191 #define to_mmp_pdma_desc(lh) \
192 container_of(lh, struct mmp_pdma_desc_sw, node)
193 #define to_mmp_pdma_chan(dchan) \
194 container_of(dchan, struct mmp_pdma_chan, chan)
195 #define to_mmp_pdma_dev(dmadev) \
196 container_of(dmadev, struct mmp_pdma_device, device)
197
198 /* For 32-bit PDMA */
write_next_addr_32(struct mmp_pdma_phy * phy,dma_addr_t addr)199 static void write_next_addr_32(struct mmp_pdma_phy *phy, dma_addr_t addr)
200 {
201 writel(addr, phy->base + DDADR(phy->idx));
202 }
203
read_src_addr_32(struct mmp_pdma_phy * phy)204 static u64 read_src_addr_32(struct mmp_pdma_phy *phy)
205 {
206 return readl(phy->base + DSADR(phy->idx));
207 }
208
read_dst_addr_32(struct mmp_pdma_phy * phy)209 static u64 read_dst_addr_32(struct mmp_pdma_phy *phy)
210 {
211 return readl(phy->base + DTADR(phy->idx));
212 }
213
set_desc_next_addr_32(struct mmp_pdma_desc_hw * desc,dma_addr_t addr)214 static void set_desc_next_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
215 {
216 desc->ddadr = addr;
217 }
218
set_desc_src_addr_32(struct mmp_pdma_desc_hw * desc,dma_addr_t addr)219 static void set_desc_src_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
220 {
221 desc->dsadr = addr;
222 }
223
set_desc_dst_addr_32(struct mmp_pdma_desc_hw * desc,dma_addr_t addr)224 static void set_desc_dst_addr_32(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
225 {
226 desc->dtadr = addr;
227 }
228
get_desc_src_addr_32(const struct mmp_pdma_desc_hw * desc)229 static u64 get_desc_src_addr_32(const struct mmp_pdma_desc_hw *desc)
230 {
231 return desc->dsadr;
232 }
233
get_desc_dst_addr_32(const struct mmp_pdma_desc_hw * desc)234 static u64 get_desc_dst_addr_32(const struct mmp_pdma_desc_hw *desc)
235 {
236 return desc->dtadr;
237 }
238
239 /* For 64-bit PDMA */
write_next_addr_64(struct mmp_pdma_phy * phy,dma_addr_t addr)240 static void write_next_addr_64(struct mmp_pdma_phy *phy, dma_addr_t addr)
241 {
242 writel(lower_32_bits(addr), phy->base + DDADR(phy->idx));
243 writel(upper_32_bits(addr), phy->base + DDADRH(phy->idx));
244 }
245
read_src_addr_64(struct mmp_pdma_phy * phy)246 static u64 read_src_addr_64(struct mmp_pdma_phy *phy)
247 {
248 u32 low = readl(phy->base + DSADR(phy->idx));
249 u32 high = readl(phy->base + DSADRH(phy->idx));
250
251 return ((u64)high << 32) | low;
252 }
253
read_dst_addr_64(struct mmp_pdma_phy * phy)254 static u64 read_dst_addr_64(struct mmp_pdma_phy *phy)
255 {
256 u32 low = readl(phy->base + DTADR(phy->idx));
257 u32 high = readl(phy->base + DTADRH(phy->idx));
258
259 return ((u64)high << 32) | low;
260 }
261
set_desc_next_addr_64(struct mmp_pdma_desc_hw * desc,dma_addr_t addr)262 static void set_desc_next_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
263 {
264 desc->ddadr = lower_32_bits(addr);
265 desc->ddadrh = upper_32_bits(addr);
266 }
267
set_desc_src_addr_64(struct mmp_pdma_desc_hw * desc,dma_addr_t addr)268 static void set_desc_src_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
269 {
270 desc->dsadr = lower_32_bits(addr);
271 desc->dsadrh = upper_32_bits(addr);
272 }
273
set_desc_dst_addr_64(struct mmp_pdma_desc_hw * desc,dma_addr_t addr)274 static void set_desc_dst_addr_64(struct mmp_pdma_desc_hw *desc, dma_addr_t addr)
275 {
276 desc->dtadr = lower_32_bits(addr);
277 desc->dtadrh = upper_32_bits(addr);
278 }
279
get_desc_src_addr_64(const struct mmp_pdma_desc_hw * desc)280 static u64 get_desc_src_addr_64(const struct mmp_pdma_desc_hw *desc)
281 {
282 return ((u64)desc->dsadrh << 32) | desc->dsadr;
283 }
284
get_desc_dst_addr_64(const struct mmp_pdma_desc_hw * desc)285 static u64 get_desc_dst_addr_64(const struct mmp_pdma_desc_hw *desc)
286 {
287 return ((u64)desc->dtadrh << 32) | desc->dtadr;
288 }
289
290 static int mmp_pdma_config_write(struct dma_chan *dchan,
291 struct dma_slave_config *cfg,
292 enum dma_transfer_direction direction);
293
enable_chan(struct mmp_pdma_phy * phy)294 static void enable_chan(struct mmp_pdma_phy *phy)
295 {
296 u32 reg, dalgn;
297 struct mmp_pdma_device *pdev;
298
299 if (!phy->vchan)
300 return;
301
302 pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
303
304 reg = DRCMR(phy->vchan->drcmr);
305 writel(DRCMR_MAPVLD | phy->idx, phy->base + reg);
306
307 dalgn = readl(phy->base + DALGN);
308 if (phy->vchan->byte_align)
309 dalgn |= 1 << phy->idx;
310 else
311 dalgn &= ~(1 << phy->idx);
312 writel(dalgn, phy->base + DALGN);
313
314 reg = (phy->idx << 2) + DCSR;
315 writel(readl(phy->base + reg) | pdev->ops->run_bits,
316 phy->base + reg);
317 }
318
disable_chan(struct mmp_pdma_phy * phy)319 static void disable_chan(struct mmp_pdma_phy *phy)
320 {
321 u32 reg, dcsr;
322
323 if (!phy)
324 return;
325
326 reg = (phy->idx << 2) + DCSR;
327 dcsr = readl(phy->base + reg);
328
329 if (phy->vchan) {
330 struct mmp_pdma_device *pdev;
331
332 pdev = to_mmp_pdma_dev(phy->vchan->chan.device);
333 writel(dcsr & ~pdev->ops->run_bits, phy->base + reg);
334 } else {
335 /* If no vchan, just clear the RUN bit */
336 writel(dcsr & ~DCSR_RUN, phy->base + reg);
337 }
338 }
339
clear_chan_irq(struct mmp_pdma_phy * phy)340 static int clear_chan_irq(struct mmp_pdma_phy *phy)
341 {
342 u32 dcsr;
343 u32 dint = readl(phy->base + DINT);
344 u32 reg = (phy->idx << 2) + DCSR;
345
346 if (!(dint & BIT(phy->idx)))
347 return -EAGAIN;
348
349 /* clear irq */
350 dcsr = readl(phy->base + reg);
351 writel(dcsr, phy->base + reg);
352 if ((dcsr & DCSR_BUSERR) && (phy->vchan))
353 dev_warn(phy->vchan->dev, "DCSR_BUSERR\n");
354
355 return 0;
356 }
357
mmp_pdma_chan_handler(int irq,void * dev_id)358 static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id)
359 {
360 struct mmp_pdma_phy *phy = dev_id;
361
362 if (clear_chan_irq(phy) != 0)
363 return IRQ_NONE;
364
365 tasklet_schedule(&phy->vchan->tasklet);
366 return IRQ_HANDLED;
367 }
368
mmp_pdma_int_handler(int irq,void * dev_id)369 static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id)
370 {
371 struct mmp_pdma_device *pdev = dev_id;
372 struct mmp_pdma_phy *phy;
373 u32 dint = readl(pdev->base + DINT);
374 int i, ret;
375 int irq_num = 0;
376
377 while (dint) {
378 i = __ffs(dint);
379 /* only handle interrupts belonging to pdma driver*/
380 if (i >= pdev->dma_channels)
381 break;
382 dint &= (dint - 1);
383 phy = &pdev->phy[i];
384 ret = mmp_pdma_chan_handler(irq, phy);
385 if (ret == IRQ_HANDLED)
386 irq_num++;
387 }
388
389 if (irq_num)
390 return IRQ_HANDLED;
391
392 return IRQ_NONE;
393 }
394
395 /* lookup free phy channel as descending priority */
lookup_phy(struct mmp_pdma_chan * pchan)396 static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan)
397 {
398 int prio, i;
399 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
400 struct mmp_pdma_phy *phy, *found = NULL;
401 unsigned long flags;
402
403 /*
404 * dma channel priorities
405 * ch 0 - 3, 16 - 19 <--> (0)
406 * ch 4 - 7, 20 - 23 <--> (1)
407 * ch 8 - 11, 24 - 27 <--> (2)
408 * ch 12 - 15, 28 - 31 <--> (3)
409 */
410
411 spin_lock_irqsave(&pdev->phy_lock, flags);
412 for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) {
413 for (i = 0; i < pdev->dma_channels; i++) {
414 if (prio != (i & 0xf) >> 2)
415 continue;
416 phy = &pdev->phy[i];
417 if (!phy->vchan) {
418 phy->vchan = pchan;
419 found = phy;
420 goto out_unlock;
421 }
422 }
423 }
424
425 out_unlock:
426 spin_unlock_irqrestore(&pdev->phy_lock, flags);
427 return found;
428 }
429
mmp_pdma_free_phy(struct mmp_pdma_chan * pchan)430 static void mmp_pdma_free_phy(struct mmp_pdma_chan *pchan)
431 {
432 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(pchan->chan.device);
433 unsigned long flags;
434 u32 reg;
435
436 if (!pchan->phy)
437 return;
438
439 /* clear the channel mapping in DRCMR */
440 reg = DRCMR(pchan->drcmr);
441 writel(0, pchan->phy->base + reg);
442
443 spin_lock_irqsave(&pdev->phy_lock, flags);
444 pchan->phy->vchan = NULL;
445 pchan->phy = NULL;
446 spin_unlock_irqrestore(&pdev->phy_lock, flags);
447 }
448
449 /*
450 * start_pending_queue - transfer any pending transactions
451 * pending list ==> running list
452 */
start_pending_queue(struct mmp_pdma_chan * chan)453 static void start_pending_queue(struct mmp_pdma_chan *chan)
454 {
455 struct mmp_pdma_desc_sw *desc;
456 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
457
458 /* still in running, irq will start the pending list */
459 if (!chan->idle) {
460 dev_dbg(chan->dev, "DMA controller still busy\n");
461 return;
462 }
463
464 if (list_empty(&chan->chain_pending)) {
465 /* chance to re-fetch phy channel with higher prio */
466 mmp_pdma_free_phy(chan);
467 dev_dbg(chan->dev, "no pending list\n");
468 return;
469 }
470
471 if (!chan->phy) {
472 chan->phy = lookup_phy(chan);
473 if (!chan->phy) {
474 dev_dbg(chan->dev, "no free dma channel\n");
475 return;
476 }
477 }
478
479 /*
480 * pending -> running
481 * reintilize pending list
482 */
483 desc = list_first_entry(&chan->chain_pending,
484 struct mmp_pdma_desc_sw, node);
485 list_splice_tail_init(&chan->chain_pending, &chan->chain_running);
486
487 /*
488 * Program the descriptor's address into the DMA controller,
489 * then start the DMA transaction
490 */
491 pdev->ops->write_next_addr(chan->phy, desc->async_tx.phys);
492 enable_chan(chan->phy);
493 chan->idle = false;
494 }
495
496
497 /* desc->tx_list ==> pending list */
mmp_pdma_tx_submit(struct dma_async_tx_descriptor * tx)498 static dma_cookie_t mmp_pdma_tx_submit(struct dma_async_tx_descriptor *tx)
499 {
500 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(tx->chan);
501 struct mmp_pdma_desc_sw *desc = tx_to_mmp_pdma_desc(tx);
502 struct mmp_pdma_desc_sw *child;
503 unsigned long flags;
504 dma_cookie_t cookie = -EBUSY;
505
506 spin_lock_irqsave(&chan->desc_lock, flags);
507
508 list_for_each_entry(child, &desc->tx_list, node) {
509 cookie = dma_cookie_assign(&child->async_tx);
510 }
511
512 /* softly link to pending list - desc->tx_list ==> pending list */
513 list_splice_tail_init(&desc->tx_list, &chan->chain_pending);
514
515 spin_unlock_irqrestore(&chan->desc_lock, flags);
516
517 return cookie;
518 }
519
520 static struct mmp_pdma_desc_sw *
mmp_pdma_alloc_descriptor(struct mmp_pdma_chan * chan)521 mmp_pdma_alloc_descriptor(struct mmp_pdma_chan *chan)
522 {
523 struct mmp_pdma_desc_sw *desc;
524 dma_addr_t pdesc;
525
526 desc = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
527 if (!desc) {
528 dev_err(chan->dev, "out of memory for link descriptor\n");
529 return NULL;
530 }
531
532 INIT_LIST_HEAD(&desc->tx_list);
533 dma_async_tx_descriptor_init(&desc->async_tx, &chan->chan);
534 /* each desc has submit */
535 desc->async_tx.tx_submit = mmp_pdma_tx_submit;
536 desc->async_tx.phys = pdesc;
537
538 return desc;
539 }
540
541 /*
542 * mmp_pdma_alloc_chan_resources - Allocate resources for DMA channel.
543 *
544 * This function will create a dma pool for descriptor allocation.
545 * Request irq only when channel is requested
546 * Return - The number of allocated descriptors.
547 */
548
mmp_pdma_alloc_chan_resources(struct dma_chan * dchan)549 static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan)
550 {
551 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
552
553 if (chan->desc_pool)
554 return 1;
555
556 chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device),
557 chan->dev,
558 sizeof(struct mmp_pdma_desc_sw),
559 __alignof__(struct mmp_pdma_desc_sw),
560 0);
561 if (!chan->desc_pool) {
562 dev_err(chan->dev, "unable to allocate descriptor pool\n");
563 return -ENOMEM;
564 }
565
566 mmp_pdma_free_phy(chan);
567 chan->idle = true;
568 chan->dev_addr = 0;
569 return 1;
570 }
571
mmp_pdma_free_desc_list(struct mmp_pdma_chan * chan,struct list_head * list)572 static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan,
573 struct list_head *list)
574 {
575 struct mmp_pdma_desc_sw *desc, *_desc;
576
577 list_for_each_entry_safe(desc, _desc, list, node) {
578 list_del(&desc->node);
579 dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
580 }
581 }
582
mmp_pdma_free_chan_resources(struct dma_chan * dchan)583 static void mmp_pdma_free_chan_resources(struct dma_chan *dchan)
584 {
585 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
586 unsigned long flags;
587
588 spin_lock_irqsave(&chan->desc_lock, flags);
589 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
590 mmp_pdma_free_desc_list(chan, &chan->chain_running);
591 spin_unlock_irqrestore(&chan->desc_lock, flags);
592
593 dma_pool_destroy(chan->desc_pool);
594 chan->desc_pool = NULL;
595 chan->idle = true;
596 chan->dev_addr = 0;
597 mmp_pdma_free_phy(chan);
598 return;
599 }
600
601 static struct dma_async_tx_descriptor *
mmp_pdma_prep_memcpy(struct dma_chan * dchan,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,unsigned long flags)602 mmp_pdma_prep_memcpy(struct dma_chan *dchan,
603 dma_addr_t dma_dst, dma_addr_t dma_src,
604 size_t len, unsigned long flags)
605 {
606 struct mmp_pdma_chan *chan;
607 struct mmp_pdma_device *pdev;
608 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
609 size_t copy = 0;
610
611 if (!dchan || !len)
612 return NULL;
613
614 pdev = to_mmp_pdma_dev(dchan->device);
615 chan = to_mmp_pdma_chan(dchan);
616 chan->byte_align = false;
617
618 if (!chan->dir) {
619 chan->dir = DMA_MEM_TO_MEM;
620 chan->dcmd = DCMD_INCTRGADDR | DCMD_INCSRCADDR;
621 chan->dcmd |= DCMD_BURST32;
622 }
623
624 do {
625 /* Allocate the link descriptor from DMA pool */
626 new = mmp_pdma_alloc_descriptor(chan);
627 if (!new) {
628 dev_err(chan->dev, "no memory for desc\n");
629 goto fail;
630 }
631
632 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
633 if (dma_src & 0x7 || dma_dst & 0x7)
634 chan->byte_align = true;
635
636 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & copy);
637 pdev->ops->set_desc_src_addr(&new->desc, dma_src);
638 pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
639
640 if (!first)
641 first = new;
642 else
643 pdev->ops->set_desc_next_addr(&prev->desc,
644 new->async_tx.phys);
645
646 new->async_tx.cookie = 0;
647 async_tx_ack(&new->async_tx);
648
649 prev = new;
650 len -= copy;
651
652 if (chan->dir == DMA_MEM_TO_DEV) {
653 dma_src += copy;
654 } else if (chan->dir == DMA_DEV_TO_MEM) {
655 dma_dst += copy;
656 } else if (chan->dir == DMA_MEM_TO_MEM) {
657 dma_src += copy;
658 dma_dst += copy;
659 }
660
661 /* Insert the link descriptor to the LD ring */
662 list_add_tail(&new->node, &first->tx_list);
663 } while (len);
664
665 first->async_tx.flags = flags; /* client is in control of this ack */
666 first->async_tx.cookie = -EBUSY;
667
668 /* last desc and fire IRQ */
669 new->desc.ddadr = DDADR_STOP;
670 new->desc.dcmd |= DCMD_ENDIRQEN;
671
672 chan->cyclic_first = NULL;
673
674 return &first->async_tx;
675
676 fail:
677 if (first)
678 mmp_pdma_free_desc_list(chan, &first->tx_list);
679 return NULL;
680 }
681
682 static struct dma_async_tx_descriptor *
mmp_pdma_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * context)683 mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
684 unsigned int sg_len, enum dma_transfer_direction dir,
685 unsigned long flags, void *context)
686 {
687 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
688 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(dchan->device);
689 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL;
690 size_t len, avail;
691 struct scatterlist *sg;
692 dma_addr_t addr;
693 int i;
694
695 if ((sgl == NULL) || (sg_len == 0))
696 return NULL;
697
698 chan->byte_align = false;
699
700 mmp_pdma_config_write(dchan, &chan->slave_config, dir);
701
702 for_each_sg(sgl, sg, sg_len, i) {
703 addr = sg_dma_address(sg);
704 avail = sg_dma_len(sgl);
705
706 do {
707 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
708 if (addr & 0x7)
709 chan->byte_align = true;
710
711 /* allocate and populate the descriptor */
712 new = mmp_pdma_alloc_descriptor(chan);
713 if (!new) {
714 dev_err(chan->dev, "no memory for desc\n");
715 goto fail;
716 }
717
718 new->desc.dcmd = chan->dcmd | (DCMD_LENGTH & len);
719 if (dir == DMA_MEM_TO_DEV) {
720 pdev->ops->set_desc_src_addr(&new->desc, addr);
721 new->desc.dtadr = chan->dev_addr;
722 } else {
723 new->desc.dsadr = chan->dev_addr;
724 pdev->ops->set_desc_dst_addr(&new->desc, addr);
725 }
726
727 if (!first)
728 first = new;
729 else
730 pdev->ops->set_desc_next_addr(&prev->desc,
731 new->async_tx.phys);
732
733 new->async_tx.cookie = 0;
734 async_tx_ack(&new->async_tx);
735 prev = new;
736
737 /* Insert the link descriptor to the LD ring */
738 list_add_tail(&new->node, &first->tx_list);
739
740 /* update metadata */
741 addr += len;
742 avail -= len;
743 } while (avail);
744 }
745
746 first->async_tx.cookie = -EBUSY;
747 first->async_tx.flags = flags;
748
749 /* last desc and fire IRQ */
750 new->desc.ddadr = DDADR_STOP;
751 new->desc.dcmd |= DCMD_ENDIRQEN;
752
753 chan->dir = dir;
754 chan->cyclic_first = NULL;
755
756 return &first->async_tx;
757
758 fail:
759 if (first)
760 mmp_pdma_free_desc_list(chan, &first->tx_list);
761 return NULL;
762 }
763
764 static struct dma_async_tx_descriptor *
mmp_pdma_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t buf_addr,size_t len,size_t period_len,enum dma_transfer_direction direction,unsigned long flags)765 mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan,
766 dma_addr_t buf_addr, size_t len, size_t period_len,
767 enum dma_transfer_direction direction,
768 unsigned long flags)
769 {
770 struct mmp_pdma_chan *chan;
771 struct mmp_pdma_device *pdev;
772 struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new;
773 dma_addr_t dma_src, dma_dst;
774
775 if (!dchan || !len || !period_len)
776 return NULL;
777
778 pdev = to_mmp_pdma_dev(dchan->device);
779
780 /* the buffer length must be a multiple of period_len */
781 if (len % period_len != 0)
782 return NULL;
783
784 if (period_len > PDMA_MAX_DESC_BYTES)
785 return NULL;
786
787 chan = to_mmp_pdma_chan(dchan);
788 mmp_pdma_config_write(dchan, &chan->slave_config, direction);
789
790 switch (direction) {
791 case DMA_MEM_TO_DEV:
792 dma_src = buf_addr;
793 dma_dst = chan->dev_addr;
794 break;
795 case DMA_DEV_TO_MEM:
796 dma_dst = buf_addr;
797 dma_src = chan->dev_addr;
798 break;
799 default:
800 dev_err(chan->dev, "Unsupported direction for cyclic DMA\n");
801 return NULL;
802 }
803
804 chan->dir = direction;
805
806 do {
807 /* Allocate the link descriptor from DMA pool */
808 new = mmp_pdma_alloc_descriptor(chan);
809 if (!new) {
810 dev_err(chan->dev, "no memory for desc\n");
811 goto fail;
812 }
813
814 new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN |
815 (DCMD_LENGTH & period_len));
816 pdev->ops->set_desc_src_addr(&new->desc, dma_src);
817 pdev->ops->set_desc_dst_addr(&new->desc, dma_dst);
818
819 if (!first)
820 first = new;
821 else
822 pdev->ops->set_desc_next_addr(&prev->desc,
823 new->async_tx.phys);
824
825 new->async_tx.cookie = 0;
826 async_tx_ack(&new->async_tx);
827
828 prev = new;
829 len -= period_len;
830
831 if (chan->dir == DMA_MEM_TO_DEV)
832 dma_src += period_len;
833 else
834 dma_dst += period_len;
835
836 /* Insert the link descriptor to the LD ring */
837 list_add_tail(&new->node, &first->tx_list);
838 } while (len);
839
840 first->async_tx.flags = flags; /* client is in control of this ack */
841 first->async_tx.cookie = -EBUSY;
842
843 /* make the cyclic link */
844 pdev->ops->set_desc_next_addr(&new->desc, first->async_tx.phys);
845 chan->cyclic_first = first;
846
847 return &first->async_tx;
848
849 fail:
850 if (first)
851 mmp_pdma_free_desc_list(chan, &first->tx_list);
852 return NULL;
853 }
854
mmp_pdma_config_write(struct dma_chan * dchan,struct dma_slave_config * cfg,enum dma_transfer_direction direction)855 static int mmp_pdma_config_write(struct dma_chan *dchan,
856 struct dma_slave_config *cfg,
857 enum dma_transfer_direction direction)
858 {
859 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
860 u32 maxburst = 0, addr = 0;
861 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
862
863 if (!dchan)
864 return -EINVAL;
865
866 if (direction == DMA_DEV_TO_MEM) {
867 chan->dcmd = DCMD_INCTRGADDR | DCMD_FLOWSRC;
868 maxburst = cfg->src_maxburst;
869 width = cfg->src_addr_width;
870 addr = cfg->src_addr;
871 } else if (direction == DMA_MEM_TO_DEV) {
872 chan->dcmd = DCMD_INCSRCADDR | DCMD_FLOWTRG;
873 maxburst = cfg->dst_maxburst;
874 width = cfg->dst_addr_width;
875 addr = cfg->dst_addr;
876 }
877
878 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
879 chan->dcmd |= DCMD_WIDTH1;
880 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
881 chan->dcmd |= DCMD_WIDTH2;
882 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
883 chan->dcmd |= DCMD_WIDTH4;
884
885 if (maxburst == 8)
886 chan->dcmd |= DCMD_BURST8;
887 else if (maxburst == 16)
888 chan->dcmd |= DCMD_BURST16;
889 else if (maxburst == 32)
890 chan->dcmd |= DCMD_BURST32;
891
892 chan->dir = direction;
893 chan->dev_addr = addr;
894
895 return 0;
896 }
897
mmp_pdma_config(struct dma_chan * dchan,struct dma_slave_config * cfg)898 static int mmp_pdma_config(struct dma_chan *dchan,
899 struct dma_slave_config *cfg)
900 {
901 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
902
903 memcpy(&chan->slave_config, cfg, sizeof(*cfg));
904 return 0;
905 }
906
mmp_pdma_terminate_all(struct dma_chan * dchan)907 static int mmp_pdma_terminate_all(struct dma_chan *dchan)
908 {
909 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
910 unsigned long flags;
911
912 if (!dchan)
913 return -EINVAL;
914
915 disable_chan(chan->phy);
916 mmp_pdma_free_phy(chan);
917 spin_lock_irqsave(&chan->desc_lock, flags);
918 mmp_pdma_free_desc_list(chan, &chan->chain_pending);
919 mmp_pdma_free_desc_list(chan, &chan->chain_running);
920 spin_unlock_irqrestore(&chan->desc_lock, flags);
921 chan->idle = true;
922
923 return 0;
924 }
925
mmp_pdma_residue(struct mmp_pdma_chan * chan,dma_cookie_t cookie)926 static unsigned int mmp_pdma_residue(struct mmp_pdma_chan *chan,
927 dma_cookie_t cookie)
928 {
929 struct mmp_pdma_desc_sw *sw;
930 struct mmp_pdma_device *pdev = to_mmp_pdma_dev(chan->chan.device);
931 unsigned long flags;
932 u64 curr;
933 u32 residue = 0;
934 bool passed = false;
935 bool cyclic = chan->cyclic_first != NULL;
936
937 /*
938 * If the channel does not have a phy pointer anymore, it has already
939 * been completed. Therefore, its residue is 0.
940 */
941 if (!chan->phy)
942 return 0;
943
944 if (chan->dir == DMA_DEV_TO_MEM)
945 curr = pdev->ops->read_dst_addr(chan->phy);
946 else
947 curr = pdev->ops->read_src_addr(chan->phy);
948
949 spin_lock_irqsave(&chan->desc_lock, flags);
950
951 list_for_each_entry(sw, &chan->chain_running, node) {
952 u64 start, end;
953 u32 len;
954
955 if (chan->dir == DMA_DEV_TO_MEM)
956 start = pdev->ops->get_desc_dst_addr(&sw->desc);
957 else
958 start = pdev->ops->get_desc_src_addr(&sw->desc);
959
960 len = sw->desc.dcmd & DCMD_LENGTH;
961 end = start + len;
962
963 /*
964 * 'passed' will be latched once we found the descriptor which
965 * lies inside the boundaries of the curr pointer. All
966 * descriptors that occur in the list _after_ we found that
967 * partially handled descriptor are still to be processed and
968 * are hence added to the residual bytes counter.
969 */
970
971 if (passed) {
972 residue += len;
973 } else if (curr >= start && curr <= end) {
974 residue += (u32)(end - curr);
975 passed = true;
976 }
977
978 /*
979 * Descriptors that have the ENDIRQEN bit set mark the end of a
980 * transaction chain, and the cookie assigned with it has been
981 * returned previously from mmp_pdma_tx_submit().
982 *
983 * In case we have multiple transactions in the running chain,
984 * and the cookie does not match the one the user asked us
985 * about, reset the state variables and start over.
986 *
987 * This logic does not apply to cyclic transactions, where all
988 * descriptors have the ENDIRQEN bit set, and for which we
989 * can't have multiple transactions on one channel anyway.
990 */
991 if (cyclic || !(sw->desc.dcmd & DCMD_ENDIRQEN))
992 continue;
993
994 if (sw->async_tx.cookie == cookie) {
995 spin_unlock_irqrestore(&chan->desc_lock, flags);
996 return residue;
997 } else {
998 residue = 0;
999 passed = false;
1000 }
1001 }
1002
1003 spin_unlock_irqrestore(&chan->desc_lock, flags);
1004
1005 /* We should only get here in case of cyclic transactions */
1006 return residue;
1007 }
1008
mmp_pdma_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)1009 static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan,
1010 dma_cookie_t cookie,
1011 struct dma_tx_state *txstate)
1012 {
1013 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
1014 enum dma_status ret;
1015
1016 ret = dma_cookie_status(dchan, cookie, txstate);
1017 if (likely(ret != DMA_ERROR))
1018 dma_set_residue(txstate, mmp_pdma_residue(chan, cookie));
1019
1020 return ret;
1021 }
1022
1023 /*
1024 * mmp_pdma_issue_pending - Issue the DMA start command
1025 * pending list ==> running list
1026 */
mmp_pdma_issue_pending(struct dma_chan * dchan)1027 static void mmp_pdma_issue_pending(struct dma_chan *dchan)
1028 {
1029 struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan);
1030 unsigned long flags;
1031
1032 spin_lock_irqsave(&chan->desc_lock, flags);
1033 start_pending_queue(chan);
1034 spin_unlock_irqrestore(&chan->desc_lock, flags);
1035 }
1036
1037 /*
1038 * dma_do_tasklet
1039 * Do call back
1040 * Start pending list
1041 */
dma_do_tasklet(struct tasklet_struct * t)1042 static void dma_do_tasklet(struct tasklet_struct *t)
1043 {
1044 struct mmp_pdma_chan *chan = from_tasklet(chan, t, tasklet);
1045 struct mmp_pdma_desc_sw *desc, *_desc;
1046 LIST_HEAD(chain_cleanup);
1047 unsigned long flags;
1048 struct dmaengine_desc_callback cb;
1049
1050 if (chan->cyclic_first) {
1051 spin_lock_irqsave(&chan->desc_lock, flags);
1052 desc = chan->cyclic_first;
1053 dmaengine_desc_get_callback(&desc->async_tx, &cb);
1054 spin_unlock_irqrestore(&chan->desc_lock, flags);
1055
1056 dmaengine_desc_callback_invoke(&cb, NULL);
1057
1058 return;
1059 }
1060
1061 /* submit pending list; callback for each desc; free desc */
1062 spin_lock_irqsave(&chan->desc_lock, flags);
1063
1064 list_for_each_entry_safe(desc, _desc, &chan->chain_running, node) {
1065 /*
1066 * move the descriptors to a temporary list so we can drop
1067 * the lock during the entire cleanup operation
1068 */
1069 list_move(&desc->node, &chain_cleanup);
1070
1071 /*
1072 * Look for the first list entry which has the ENDIRQEN flag
1073 * set. That is the descriptor we got an interrupt for, so
1074 * complete that transaction and its cookie.
1075 */
1076 if (desc->desc.dcmd & DCMD_ENDIRQEN) {
1077 dma_cookie_t cookie = desc->async_tx.cookie;
1078 dma_cookie_complete(&desc->async_tx);
1079 dev_dbg(chan->dev, "completed_cookie=%d\n", cookie);
1080 break;
1081 }
1082 }
1083
1084 /*
1085 * The hardware is idle and ready for more when the
1086 * chain_running list is empty.
1087 */
1088 chan->idle = list_empty(&chan->chain_running);
1089
1090 /* Start any pending transactions automatically */
1091 start_pending_queue(chan);
1092 spin_unlock_irqrestore(&chan->desc_lock, flags);
1093
1094 /* Run the callback for each descriptor, in order */
1095 list_for_each_entry_safe(desc, _desc, &chain_cleanup, node) {
1096 struct dma_async_tx_descriptor *txd = &desc->async_tx;
1097
1098 /* Remove from the list of transactions */
1099 list_del(&desc->node);
1100 /* Run the link descriptor callback function */
1101 dmaengine_desc_get_callback(txd, &cb);
1102 dmaengine_desc_callback_invoke(&cb, NULL);
1103
1104 dma_pool_free(chan->desc_pool, desc, txd->phys);
1105 }
1106 }
1107
mmp_pdma_remove(struct platform_device * op)1108 static void mmp_pdma_remove(struct platform_device *op)
1109 {
1110 struct mmp_pdma_device *pdev = platform_get_drvdata(op);
1111 struct mmp_pdma_phy *phy;
1112 int i, irq = 0, irq_num = 0;
1113
1114 if (op->dev.of_node)
1115 of_dma_controller_free(op->dev.of_node);
1116
1117 for (i = 0; i < pdev->dma_channels; i++) {
1118 if (platform_get_irq(op, i) > 0)
1119 irq_num++;
1120 }
1121
1122 if (irq_num != pdev->dma_channels) {
1123 irq = platform_get_irq(op, 0);
1124 devm_free_irq(&op->dev, irq, pdev);
1125 } else {
1126 for (i = 0; i < pdev->dma_channels; i++) {
1127 phy = &pdev->phy[i];
1128 irq = platform_get_irq(op, i);
1129 devm_free_irq(&op->dev, irq, phy);
1130 }
1131 }
1132
1133 dma_async_device_unregister(&pdev->device);
1134 }
1135
mmp_pdma_chan_init(struct mmp_pdma_device * pdev,int idx,int irq)1136 static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq)
1137 {
1138 struct mmp_pdma_phy *phy = &pdev->phy[idx];
1139 struct mmp_pdma_chan *chan;
1140 int ret;
1141
1142 chan = devm_kzalloc(pdev->dev, sizeof(*chan), GFP_KERNEL);
1143 if (chan == NULL)
1144 return -ENOMEM;
1145
1146 phy->idx = idx;
1147 phy->base = pdev->base;
1148
1149 if (irq) {
1150 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler,
1151 IRQF_SHARED, "pdma", phy);
1152 if (ret) {
1153 dev_err(pdev->dev, "channel request irq fail!\n");
1154 return ret;
1155 }
1156 }
1157
1158 spin_lock_init(&chan->desc_lock);
1159 chan->dev = pdev->dev;
1160 chan->chan.device = &pdev->device;
1161 tasklet_setup(&chan->tasklet, dma_do_tasklet);
1162 INIT_LIST_HEAD(&chan->chain_pending);
1163 INIT_LIST_HEAD(&chan->chain_running);
1164
1165 /* register virt channel to dma engine */
1166 list_add_tail(&chan->chan.device_node, &pdev->device.channels);
1167
1168 return 0;
1169 }
1170
1171 static const struct mmp_pdma_ops marvell_pdma_v1_ops = {
1172 .write_next_addr = write_next_addr_32,
1173 .read_src_addr = read_src_addr_32,
1174 .read_dst_addr = read_dst_addr_32,
1175 .set_desc_next_addr = set_desc_next_addr_32,
1176 .set_desc_src_addr = set_desc_src_addr_32,
1177 .set_desc_dst_addr = set_desc_dst_addr_32,
1178 .get_desc_src_addr = get_desc_src_addr_32,
1179 .get_desc_dst_addr = get_desc_dst_addr_32,
1180 .run_bits = (DCSR_RUN),
1181 .dma_width = 32,
1182 };
1183
1184 static const struct mmp_pdma_ops spacemit_k1_pdma_ops = {
1185 .write_next_addr = write_next_addr_64,
1186 .read_src_addr = read_src_addr_64,
1187 .read_dst_addr = read_dst_addr_64,
1188 .set_desc_next_addr = set_desc_next_addr_64,
1189 .set_desc_src_addr = set_desc_src_addr_64,
1190 .set_desc_dst_addr = set_desc_dst_addr_64,
1191 .get_desc_src_addr = get_desc_src_addr_64,
1192 .get_desc_dst_addr = get_desc_dst_addr_64,
1193 .run_bits = (DCSR_RUN | DCSR_LPAEEN),
1194 .dma_width = 64,
1195 };
1196
1197 static const struct of_device_id mmp_pdma_dt_ids[] = {
1198 {
1199 .compatible = "marvell,pdma-1.0",
1200 .data = &marvell_pdma_v1_ops
1201 }, {
1202 .compatible = "spacemit,k1-pdma",
1203 .data = &spacemit_k1_pdma_ops
1204 }, {
1205 /* sentinel */
1206 }
1207 };
1208 MODULE_DEVICE_TABLE(of, mmp_pdma_dt_ids);
1209
mmp_pdma_dma_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1210 static struct dma_chan *mmp_pdma_dma_xlate(struct of_phandle_args *dma_spec,
1211 struct of_dma *ofdma)
1212 {
1213 struct mmp_pdma_device *d = ofdma->of_dma_data;
1214 struct dma_chan *chan;
1215
1216 chan = dma_get_any_slave_channel(&d->device);
1217 if (!chan)
1218 return NULL;
1219
1220 to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0];
1221
1222 return chan;
1223 }
1224
mmp_pdma_probe(struct platform_device * op)1225 static int mmp_pdma_probe(struct platform_device *op)
1226 {
1227 struct mmp_pdma_device *pdev;
1228 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1229 struct clk *clk;
1230 struct reset_control *rst;
1231 int i, ret, irq = 0;
1232 int dma_channels = 0, irq_num = 0;
1233 const enum dma_slave_buswidth widths =
1234 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1235 DMA_SLAVE_BUSWIDTH_4_BYTES;
1236
1237 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1238 if (!pdev)
1239 return -ENOMEM;
1240
1241 pdev->dev = &op->dev;
1242
1243 spin_lock_init(&pdev->phy_lock);
1244
1245 pdev->base = devm_platform_ioremap_resource(op, 0);
1246 if (IS_ERR(pdev->base))
1247 return PTR_ERR(pdev->base);
1248
1249 clk = devm_clk_get_optional_enabled(pdev->dev, NULL);
1250 if (IS_ERR(clk))
1251 return PTR_ERR(clk);
1252
1253 rst = devm_reset_control_get_optional_exclusive_deasserted(pdev->dev,
1254 NULL);
1255 if (IS_ERR(rst))
1256 return PTR_ERR(rst);
1257
1258 pdev->ops = of_device_get_match_data(&op->dev);
1259 if (!pdev->ops)
1260 return -ENODEV;
1261
1262 if (pdev->dev->of_node) {
1263 /* Parse new and deprecated dma-channels properties */
1264 if (of_property_read_u32(pdev->dev->of_node, "dma-channels",
1265 &dma_channels))
1266 of_property_read_u32(pdev->dev->of_node, "#dma-channels",
1267 &dma_channels);
1268 } else if (pdata && pdata->dma_channels) {
1269 dma_channels = pdata->dma_channels;
1270 } else {
1271 dma_channels = 32; /* default 32 channel */
1272 }
1273 pdev->dma_channels = dma_channels;
1274
1275 for (i = 0; i < dma_channels; i++) {
1276 if (platform_get_irq_optional(op, i) > 0)
1277 irq_num++;
1278 }
1279
1280 pdev->phy = devm_kcalloc(pdev->dev, dma_channels, sizeof(*pdev->phy),
1281 GFP_KERNEL);
1282 if (pdev->phy == NULL)
1283 return -ENOMEM;
1284
1285 INIT_LIST_HEAD(&pdev->device.channels);
1286
1287 if (irq_num != dma_channels) {
1288 /* all chan share one irq, demux inside */
1289 irq = platform_get_irq(op, 0);
1290 ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler,
1291 IRQF_SHARED, "pdma", pdev);
1292 if (ret)
1293 return ret;
1294 }
1295
1296 for (i = 0; i < dma_channels; i++) {
1297 irq = (irq_num != dma_channels) ? 0 : platform_get_irq(op, i);
1298 ret = mmp_pdma_chan_init(pdev, i, irq);
1299 if (ret)
1300 return ret;
1301 }
1302
1303 dma_cap_set(DMA_SLAVE, pdev->device.cap_mask);
1304 dma_cap_set(DMA_MEMCPY, pdev->device.cap_mask);
1305 dma_cap_set(DMA_CYCLIC, pdev->device.cap_mask);
1306 dma_cap_set(DMA_PRIVATE, pdev->device.cap_mask);
1307 pdev->device.dev = &op->dev;
1308 pdev->device.device_alloc_chan_resources = mmp_pdma_alloc_chan_resources;
1309 pdev->device.device_free_chan_resources = mmp_pdma_free_chan_resources;
1310 pdev->device.device_tx_status = mmp_pdma_tx_status;
1311 pdev->device.device_prep_dma_memcpy = mmp_pdma_prep_memcpy;
1312 pdev->device.device_prep_slave_sg = mmp_pdma_prep_slave_sg;
1313 pdev->device.device_prep_dma_cyclic = mmp_pdma_prep_dma_cyclic;
1314 pdev->device.device_issue_pending = mmp_pdma_issue_pending;
1315 pdev->device.device_config = mmp_pdma_config;
1316 pdev->device.device_terminate_all = mmp_pdma_terminate_all;
1317 pdev->device.copy_align = DMAENGINE_ALIGN_8_BYTES;
1318 pdev->device.src_addr_widths = widths;
1319 pdev->device.dst_addr_widths = widths;
1320 pdev->device.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1321 pdev->device.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1322
1323 /* Set DMA mask based on controller hardware capabilities */
1324 dma_set_mask_and_coherent(pdev->dev,
1325 DMA_BIT_MASK(pdev->ops->dma_width));
1326
1327 ret = dma_async_device_register(&pdev->device);
1328 if (ret) {
1329 dev_err(pdev->device.dev, "unable to register\n");
1330 return ret;
1331 }
1332
1333 if (op->dev.of_node) {
1334 /* Device-tree DMA controller registration */
1335 ret = of_dma_controller_register(op->dev.of_node,
1336 mmp_pdma_dma_xlate, pdev);
1337 if (ret < 0) {
1338 dev_err(&op->dev, "of_dma_controller_register failed\n");
1339 dma_async_device_unregister(&pdev->device);
1340 return ret;
1341 }
1342 }
1343
1344 platform_set_drvdata(op, pdev);
1345 dev_info(pdev->device.dev, "initialized %d channels\n", dma_channels);
1346 return 0;
1347 }
1348
1349 static const struct platform_device_id mmp_pdma_id_table[] = {
1350 { "mmp-pdma", },
1351 { },
1352 };
1353
1354 static struct platform_driver mmp_pdma_driver = {
1355 .driver = {
1356 .name = "mmp-pdma",
1357 .of_match_table = mmp_pdma_dt_ids,
1358 },
1359 .id_table = mmp_pdma_id_table,
1360 .probe = mmp_pdma_probe,
1361 .remove = mmp_pdma_remove,
1362 };
1363
1364 module_platform_driver(mmp_pdma_driver);
1365
1366 MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver");
1367 MODULE_AUTHOR("Marvell International Ltd.");
1368 MODULE_LICENSE("GPL v2");
1369