xref: /linux/drivers/dma/altera-msgdma.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * DMA driver for Altera mSGDMA IP core
4  *
5  * Copyright (C) 2017 Stefan Roese <sr@denx.de>
6  *
7  * Based on drivers/dma/xilinx/zynqmp_dma.c, which is:
8  * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
9  */
10 
11 #include <linux/bitops.h>
12 #include <linux/delay.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/dmapool.h>
15 #include <linux/init.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/iopoll.h>
19 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/slab.h>
22 #include <linux/of_dma.h>
23 
24 #include "dmaengine.h"
25 
26 #define MSGDMA_MAX_TRANS_LEN		U32_MAX
27 #define MSGDMA_DESC_NUM			1024
28 
29 /**
30  * struct msgdma_extended_desc - implements an extended descriptor
31  * @read_addr_lo: data buffer source address low bits
32  * @write_addr_lo: data buffer destination address low bits
33  * @len: the number of bytes to transfer per descriptor
34  * @burst_seq_num: bit 31:24 write burst
35  *		   bit 23:16 read burst
36  *		   bit 15:00 sequence number
37  * @stride: bit 31:16 write stride
38  *	    bit 15:00 read stride
39  * @read_addr_hi: data buffer source address high bits
40  * @write_addr_hi: data buffer destination address high bits
41  * @control: characteristics of the transfer
42  */
43 struct msgdma_extended_desc {
44 	u32 read_addr_lo;
45 	u32 write_addr_lo;
46 	u32 len;
47 	u32 burst_seq_num;
48 	u32 stride;
49 	u32 read_addr_hi;
50 	u32 write_addr_hi;
51 	u32 control;
52 };
53 
54 /* mSGDMA descriptor control field bit definitions */
55 #define MSGDMA_DESC_CTL_SET_CH(x)	((x) & 0xff)
56 #define MSGDMA_DESC_CTL_GEN_SOP		BIT(8)
57 #define MSGDMA_DESC_CTL_GEN_EOP		BIT(9)
58 #define MSGDMA_DESC_CTL_PARK_READS	BIT(10)
59 #define MSGDMA_DESC_CTL_PARK_WRITES	BIT(11)
60 #define MSGDMA_DESC_CTL_END_ON_EOP	BIT(12)
61 #define MSGDMA_DESC_CTL_END_ON_LEN	BIT(13)
62 #define MSGDMA_DESC_CTL_TR_COMP_IRQ	BIT(14)
63 #define MSGDMA_DESC_CTL_EARLY_IRQ	BIT(15)
64 #define MSGDMA_DESC_CTL_TR_ERR_IRQ	GENMASK(23, 16)
65 #define MSGDMA_DESC_CTL_EARLY_DONE	BIT(24)
66 
67 /*
68  * Writing "1" the "go" bit commits the entire descriptor into the
69  * descriptor FIFO(s)
70  */
71 #define MSGDMA_DESC_CTL_GO		BIT(31)
72 
73 /* Tx buffer control flags */
74 #define MSGDMA_DESC_CTL_TX_FIRST	(MSGDMA_DESC_CTL_GEN_SOP |	\
75 					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
76 					 MSGDMA_DESC_CTL_GO)
77 
78 #define MSGDMA_DESC_CTL_TX_MIDDLE	(MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
79 					 MSGDMA_DESC_CTL_GO)
80 
81 #define MSGDMA_DESC_CTL_TX_LAST		(MSGDMA_DESC_CTL_GEN_EOP |	\
82 					 MSGDMA_DESC_CTL_TR_COMP_IRQ |	\
83 					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
84 					 MSGDMA_DESC_CTL_GO)
85 
86 #define MSGDMA_DESC_CTL_TX_SINGLE	(MSGDMA_DESC_CTL_GEN_SOP |	\
87 					 MSGDMA_DESC_CTL_GEN_EOP |	\
88 					 MSGDMA_DESC_CTL_TR_COMP_IRQ |	\
89 					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
90 					 MSGDMA_DESC_CTL_GO)
91 
92 #define MSGDMA_DESC_CTL_RX_SINGLE	(MSGDMA_DESC_CTL_END_ON_EOP |	\
93 					 MSGDMA_DESC_CTL_END_ON_LEN |	\
94 					 MSGDMA_DESC_CTL_TR_COMP_IRQ |	\
95 					 MSGDMA_DESC_CTL_EARLY_IRQ |	\
96 					 MSGDMA_DESC_CTL_TR_ERR_IRQ |	\
97 					 MSGDMA_DESC_CTL_GO)
98 
99 /* mSGDMA extended descriptor stride definitions */
100 #define MSGDMA_DESC_STRIDE_RD		0x00000001
101 #define MSGDMA_DESC_STRIDE_WR		0x00010000
102 #define MSGDMA_DESC_STRIDE_RW		0x00010001
103 
104 /* mSGDMA dispatcher control and status register map */
105 #define MSGDMA_CSR_STATUS		0x00	/* Read / Clear */
106 #define MSGDMA_CSR_CONTROL		0x04	/* Read / Write */
107 #define MSGDMA_CSR_RW_FILL_LEVEL	0x08	/* 31:16 - write fill level */
108 						/* 15:00 - read fill level */
109 #define MSGDMA_CSR_RESP_FILL_LEVEL	0x0c	/* response FIFO fill level */
110 #define MSGDMA_CSR_RW_SEQ_NUM		0x10	/* 31:16 - write seq number */
111 						/* 15:00 - read seq number */
112 
113 /* mSGDMA CSR status register bit definitions */
114 #define MSGDMA_CSR_STAT_BUSY			BIT(0)
115 #define MSGDMA_CSR_STAT_DESC_BUF_EMPTY		BIT(1)
116 #define MSGDMA_CSR_STAT_DESC_BUF_FULL		BIT(2)
117 #define MSGDMA_CSR_STAT_RESP_BUF_EMPTY		BIT(3)
118 #define MSGDMA_CSR_STAT_RESP_BUF_FULL		BIT(4)
119 #define MSGDMA_CSR_STAT_STOPPED			BIT(5)
120 #define MSGDMA_CSR_STAT_RESETTING		BIT(6)
121 #define MSGDMA_CSR_STAT_STOPPED_ON_ERR		BIT(7)
122 #define MSGDMA_CSR_STAT_STOPPED_ON_EARLY	BIT(8)
123 #define MSGDMA_CSR_STAT_IRQ			BIT(9)
124 #define MSGDMA_CSR_STAT_MASK			GENMASK(9, 0)
125 #define MSGDMA_CSR_STAT_MASK_WITHOUT_IRQ	GENMASK(8, 0)
126 
127 #define DESC_EMPTY	(MSGDMA_CSR_STAT_DESC_BUF_EMPTY | \
128 			 MSGDMA_CSR_STAT_RESP_BUF_EMPTY)
129 
130 /* mSGDMA CSR control register bit definitions */
131 #define MSGDMA_CSR_CTL_STOP			BIT(0)
132 #define MSGDMA_CSR_CTL_RESET			BIT(1)
133 #define MSGDMA_CSR_CTL_STOP_ON_ERR		BIT(2)
134 #define MSGDMA_CSR_CTL_STOP_ON_EARLY		BIT(3)
135 #define MSGDMA_CSR_CTL_GLOBAL_INTR		BIT(4)
136 #define MSGDMA_CSR_CTL_STOP_DESCS		BIT(5)
137 
138 /* mSGDMA CSR fill level bits */
139 #define MSGDMA_CSR_WR_FILL_LEVEL_GET(v)		(((v) & 0xffff0000) >> 16)
140 #define MSGDMA_CSR_RD_FILL_LEVEL_GET(v)		((v) & 0x0000ffff)
141 #define MSGDMA_CSR_RESP_FILL_LEVEL_GET(v)	((v) & 0x0000ffff)
142 
143 #define MSGDMA_CSR_SEQ_NUM_GET(v)		(((v) & 0xffff0000) >> 16)
144 
145 /* mSGDMA response register map */
146 #define MSGDMA_RESP_BYTES_TRANSFERRED	0x00
147 #define MSGDMA_RESP_STATUS		0x04
148 
149 /* mSGDMA response register bit definitions */
150 #define MSGDMA_RESP_EARLY_TERM	BIT(8)
151 #define MSGDMA_RESP_ERR_MASK	0xff
152 
153 /**
154  * struct msgdma_sw_desc - implements a sw descriptor
155  * @async_tx: support for the async_tx api
156  * @hw_desc: associated HW descriptor
157  * @node: node to move from the free list to the tx list
158  * @tx_list: transmit list node
159  */
160 struct msgdma_sw_desc {
161 	struct dma_async_tx_descriptor async_tx;
162 	struct msgdma_extended_desc hw_desc;
163 	struct list_head node;
164 	struct list_head tx_list;
165 };
166 
167 /*
168  * struct msgdma_device - DMA device structure
169  */
170 struct msgdma_device {
171 	spinlock_t lock;
172 	struct device *dev;
173 	struct tasklet_struct irq_tasklet;
174 	struct list_head pending_list;
175 	struct list_head free_list;
176 	struct list_head active_list;
177 	struct list_head done_list;
178 	u32 desc_free_cnt;
179 	bool idle;
180 
181 	struct dma_device dmadev;
182 	struct dma_chan	dmachan;
183 	dma_addr_t hw_desq;
184 	struct msgdma_sw_desc *sw_desq;
185 	unsigned int npendings;
186 
187 	struct dma_slave_config slave_cfg;
188 
189 	int irq;
190 
191 	/* mSGDMA controller */
192 	void __iomem *csr;
193 
194 	/* mSGDMA descriptors */
195 	void __iomem *desc;
196 
197 	/* mSGDMA response */
198 	void __iomem *resp;
199 };
200 
201 #define to_mdev(chan)	container_of(chan, struct msgdma_device, dmachan)
202 #define tx_to_desc(tx)	container_of(tx, struct msgdma_sw_desc, async_tx)
203 
204 /**
205  * msgdma_get_descriptor - Get the sw descriptor from the pool
206  * @mdev: Pointer to the Altera mSGDMA device structure
207  *
208  * Return: The sw descriptor
209  */
210 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev)
211 {
212 	struct msgdma_sw_desc *desc;
213 	unsigned long flags;
214 
215 	spin_lock_irqsave(&mdev->lock, flags);
216 	desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node);
217 	list_del(&desc->node);
218 	spin_unlock_irqrestore(&mdev->lock, flags);
219 
220 	INIT_LIST_HEAD(&desc->tx_list);
221 
222 	return desc;
223 }
224 
225 /**
226  * msgdma_free_descriptor - Issue pending transactions
227  * @mdev: Pointer to the Altera mSGDMA device structure
228  * @desc: Transaction descriptor pointer
229  */
230 static void msgdma_free_descriptor(struct msgdma_device *mdev,
231 				   struct msgdma_sw_desc *desc)
232 {
233 	struct msgdma_sw_desc *child, *next;
234 
235 	mdev->desc_free_cnt++;
236 	list_move_tail(&desc->node, &mdev->free_list);
237 	list_for_each_entry_safe(child, next, &desc->tx_list, node) {
238 		mdev->desc_free_cnt++;
239 		list_move_tail(&child->node, &mdev->free_list);
240 	}
241 }
242 
243 /**
244  * msgdma_free_desc_list - Free descriptors list
245  * @mdev: Pointer to the Altera mSGDMA device structure
246  * @list: List to parse and delete the descriptor
247  */
248 static void msgdma_free_desc_list(struct msgdma_device *mdev,
249 				  struct list_head *list)
250 {
251 	struct msgdma_sw_desc *desc, *next;
252 
253 	list_for_each_entry_safe(desc, next, list, node)
254 		msgdma_free_descriptor(mdev, desc);
255 }
256 
257 /**
258  * msgdma_desc_config - Configure the descriptor
259  * @desc: Hw descriptor pointer
260  * @dst: Destination buffer address
261  * @src: Source buffer address
262  * @len: Transfer length
263  * @stride: Read/write stride value to set
264  */
265 static void msgdma_desc_config(struct msgdma_extended_desc *desc,
266 			       dma_addr_t dst, dma_addr_t src, size_t len,
267 			       u32 stride)
268 {
269 	/* Set lower 32bits of src & dst addresses in the descriptor */
270 	desc->read_addr_lo = lower_32_bits(src);
271 	desc->write_addr_lo = lower_32_bits(dst);
272 
273 	/* Set upper 32bits of src & dst addresses in the descriptor */
274 	desc->read_addr_hi = upper_32_bits(src);
275 	desc->write_addr_hi = upper_32_bits(dst);
276 
277 	desc->len = len;
278 	desc->stride = stride;
279 	desc->burst_seq_num = 0;	/* 0 will result in max burst length */
280 
281 	/*
282 	 * Don't set interrupt on xfer end yet, this will be done later
283 	 * for the "last" descriptor
284 	 */
285 	desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO |
286 		MSGDMA_DESC_CTL_END_ON_LEN;
287 }
288 
289 /**
290  * msgdma_desc_config_eod - Mark the descriptor as end descriptor
291  * @desc: Hw descriptor pointer
292  */
293 static void msgdma_desc_config_eod(struct msgdma_extended_desc *desc)
294 {
295 	desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ;
296 }
297 
298 /**
299  * msgdma_tx_submit - Submit DMA transaction
300  * @tx: Async transaction descriptor pointer
301  *
302  * Return: cookie value
303  */
304 static dma_cookie_t msgdma_tx_submit(struct dma_async_tx_descriptor *tx)
305 {
306 	struct msgdma_device *mdev = to_mdev(tx->chan);
307 	struct msgdma_sw_desc *new;
308 	dma_cookie_t cookie;
309 	unsigned long flags;
310 
311 	new = tx_to_desc(tx);
312 	spin_lock_irqsave(&mdev->lock, flags);
313 	cookie = dma_cookie_assign(tx);
314 
315 	list_add_tail(&new->node, &mdev->pending_list);
316 	spin_unlock_irqrestore(&mdev->lock, flags);
317 
318 	return cookie;
319 }
320 
321 /**
322  * msgdma_prep_memcpy - prepare descriptors for memcpy transaction
323  * @dchan: DMA channel
324  * @dma_dst: Destination buffer address
325  * @dma_src: Source buffer address
326  * @len: Transfer length
327  * @flags: transfer ack flags
328  *
329  * Return: Async transaction descriptor on success and NULL on failure
330  */
331 static struct dma_async_tx_descriptor *
332 msgdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
333 		   dma_addr_t dma_src, size_t len, ulong flags)
334 {
335 	struct msgdma_device *mdev = to_mdev(dchan);
336 	struct msgdma_sw_desc *new, *first = NULL;
337 	struct msgdma_extended_desc *desc;
338 	size_t copy;
339 	u32 desc_cnt;
340 	unsigned long irqflags;
341 
342 	desc_cnt = DIV_ROUND_UP(len, MSGDMA_MAX_TRANS_LEN);
343 
344 	spin_lock_irqsave(&mdev->lock, irqflags);
345 	if (desc_cnt > mdev->desc_free_cnt) {
346 		spin_unlock_irqrestore(&mdev->lock, irqflags);
347 		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
348 		return NULL;
349 	}
350 	mdev->desc_free_cnt -= desc_cnt;
351 	spin_unlock_irqrestore(&mdev->lock, irqflags);
352 
353 	do {
354 		/* Allocate and populate the descriptor */
355 		new = msgdma_get_descriptor(mdev);
356 
357 		copy = min_t(size_t, len, MSGDMA_MAX_TRANS_LEN);
358 		desc = &new->hw_desc;
359 		msgdma_desc_config(desc, dma_dst, dma_src, copy,
360 				   MSGDMA_DESC_STRIDE_RW);
361 		len -= copy;
362 		dma_src += copy;
363 		dma_dst += copy;
364 		if (!first)
365 			first = new;
366 		else
367 			list_add_tail(&new->node, &first->tx_list);
368 	} while (len);
369 
370 	msgdma_desc_config_eod(desc);
371 	async_tx_ack(&first->async_tx);
372 	first->async_tx.flags = flags;
373 
374 	return &first->async_tx;
375 }
376 
377 /**
378  * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction
379  *
380  * @dchan: DMA channel
381  * @sgl: Destination scatter list
382  * @sg_len: Number of entries in destination scatter list
383  * @dir: DMA transfer direction
384  * @flags: transfer ack flags
385  * @context: transfer context (unused)
386  */
387 static struct dma_async_tx_descriptor *
388 msgdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
389 		     unsigned int sg_len, enum dma_transfer_direction dir,
390 		     unsigned long flags, void *context)
391 
392 {
393 	struct msgdma_device *mdev = to_mdev(dchan);
394 	struct dma_slave_config *cfg = &mdev->slave_cfg;
395 	struct msgdma_sw_desc *new, *first = NULL;
396 	void *desc = NULL;
397 	size_t len, avail;
398 	dma_addr_t dma_dst, dma_src;
399 	u32 desc_cnt;
400 	u32 stride;
401 	unsigned long irqflags;
402 
403 	desc_cnt = sg_nents_for_dma(sgl, sg_len, MSGDMA_MAX_TRANS_LEN);
404 
405 	spin_lock_irqsave(&mdev->lock, irqflags);
406 	if (desc_cnt > mdev->desc_free_cnt) {
407 		spin_unlock_irqrestore(&mdev->lock, irqflags);
408 		dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev);
409 		return NULL;
410 	}
411 	mdev->desc_free_cnt -= desc_cnt;
412 	spin_unlock_irqrestore(&mdev->lock, irqflags);
413 
414 	avail = sg_dma_len(sgl);
415 
416 	/* Run until we are out of scatterlist entries */
417 	while (true) {
418 		/* Allocate and populate the descriptor */
419 		new = msgdma_get_descriptor(mdev);
420 
421 		desc = &new->hw_desc;
422 		len = min_t(size_t, avail, MSGDMA_MAX_TRANS_LEN);
423 
424 		if (dir == DMA_MEM_TO_DEV) {
425 			dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
426 			dma_dst = cfg->dst_addr;
427 			stride = MSGDMA_DESC_STRIDE_RD;
428 		} else {
429 			dma_src = cfg->src_addr;
430 			dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail;
431 			stride = MSGDMA_DESC_STRIDE_WR;
432 		}
433 		msgdma_desc_config(desc, dma_dst, dma_src, len, stride);
434 		avail -= len;
435 
436 		if (!first)
437 			first = new;
438 		else
439 			list_add_tail(&new->node, &first->tx_list);
440 
441 		/* Fetch the next scatterlist entry */
442 		if (avail == 0) {
443 			if (sg_len == 0)
444 				break;
445 			sgl = sg_next(sgl);
446 			if (sgl == NULL)
447 				break;
448 			sg_len--;
449 			avail = sg_dma_len(sgl);
450 		}
451 	}
452 
453 	msgdma_desc_config_eod(desc);
454 	first->async_tx.flags = flags;
455 
456 	return &first->async_tx;
457 }
458 
459 static int msgdma_dma_config(struct dma_chan *dchan,
460 			     struct dma_slave_config *config)
461 {
462 	struct msgdma_device *mdev = to_mdev(dchan);
463 
464 	memcpy(&mdev->slave_cfg, config, sizeof(*config));
465 
466 	return 0;
467 }
468 
469 static void msgdma_reset(struct msgdma_device *mdev)
470 {
471 	u32 val;
472 	int ret;
473 
474 	/* Reset mSGDMA */
475 	iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
476 	iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL);
477 
478 	ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val,
479 				 (val & MSGDMA_CSR_STAT_RESETTING) == 0,
480 				 1, 10000);
481 	if (ret)
482 		dev_err(mdev->dev, "DMA channel did not reset\n");
483 
484 	/* Clear all status bits */
485 	iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS);
486 
487 	/* Enable the DMA controller including interrupts */
488 	iowrite32(MSGDMA_CSR_CTL_STOP_ON_ERR | MSGDMA_CSR_CTL_STOP_ON_EARLY |
489 		  MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL);
490 
491 	mdev->idle = true;
492 };
493 
494 static void msgdma_copy_one(struct msgdma_device *mdev,
495 			    struct msgdma_sw_desc *desc)
496 {
497 	void __iomem *hw_desc = mdev->desc;
498 
499 	/*
500 	 * Check if the DESC FIFO it not full. If its full, we need to wait
501 	 * for at least one entry to become free again
502 	 */
503 	while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) &
504 	       MSGDMA_CSR_STAT_DESC_BUF_FULL)
505 		mdelay(1);
506 
507 	/*
508 	 * The descriptor needs to get copied into the descriptor FIFO
509 	 * of the DMA controller. The descriptor will get flushed to the
510 	 * FIFO, once the last word (control word) is written. Since we
511 	 * are not 100% sure that memcpy() writes all word in the "correct"
512 	 * order (address from low to high) on all architectures, we make
513 	 * sure this control word is written last by single coding it and
514 	 * adding some write-barriers here.
515 	 */
516 	memcpy((void __force *)hw_desc, &desc->hw_desc,
517 	       sizeof(desc->hw_desc) - sizeof(u32));
518 
519 	/* Write control word last to flush this descriptor into the FIFO */
520 	mdev->idle = false;
521 	wmb();
522 	iowrite32(desc->hw_desc.control, hw_desc +
523 		  offsetof(struct msgdma_extended_desc, control));
524 	wmb();
525 }
526 
527 /**
528  * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO
529  * @mdev: Pointer to the Altera mSGDMA device structure
530  * @desc: Transaction descriptor pointer
531  */
532 static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev,
533 				     struct msgdma_sw_desc *desc)
534 {
535 	struct msgdma_sw_desc *sdesc, *next;
536 
537 	msgdma_copy_one(mdev, desc);
538 
539 	list_for_each_entry_safe(sdesc, next, &desc->tx_list, node)
540 		msgdma_copy_one(mdev, sdesc);
541 }
542 
543 /**
544  * msgdma_start_transfer - Initiate the new transfer
545  * @mdev: Pointer to the Altera mSGDMA device structure
546  */
547 static void msgdma_start_transfer(struct msgdma_device *mdev)
548 {
549 	struct msgdma_sw_desc *desc;
550 
551 	if (!mdev->idle)
552 		return;
553 
554 	desc = list_first_entry_or_null(&mdev->pending_list,
555 					struct msgdma_sw_desc, node);
556 	if (!desc)
557 		return;
558 
559 	list_splice_tail_init(&mdev->pending_list, &mdev->active_list);
560 	msgdma_copy_desc_to_fifo(mdev, desc);
561 }
562 
563 /**
564  * msgdma_issue_pending - Issue pending transactions
565  * @chan: DMA channel pointer
566  */
567 static void msgdma_issue_pending(struct dma_chan *chan)
568 {
569 	struct msgdma_device *mdev = to_mdev(chan);
570 	unsigned long flags;
571 
572 	spin_lock_irqsave(&mdev->lock, flags);
573 	msgdma_start_transfer(mdev);
574 	spin_unlock_irqrestore(&mdev->lock, flags);
575 }
576 
577 /**
578  * msgdma_chan_desc_cleanup - Cleanup the completed descriptors
579  * @mdev: Pointer to the Altera mSGDMA device structure
580  */
581 static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev)
582 {
583 	struct msgdma_sw_desc *desc, *next;
584 	unsigned long irqflags;
585 
586 	spin_lock_irqsave(&mdev->lock, irqflags);
587 
588 	list_for_each_entry_safe(desc, next, &mdev->done_list, node) {
589 		struct dmaengine_desc_callback cb;
590 
591 		dmaengine_desc_get_callback(&desc->async_tx, &cb);
592 		if (dmaengine_desc_callback_valid(&cb)) {
593 			spin_unlock_irqrestore(&mdev->lock, irqflags);
594 			dmaengine_desc_callback_invoke(&cb, NULL);
595 			spin_lock_irqsave(&mdev->lock, irqflags);
596 		}
597 
598 		/* Run any dependencies, then free the descriptor */
599 		msgdma_free_descriptor(mdev, desc);
600 	}
601 
602 	spin_unlock_irqrestore(&mdev->lock, irqflags);
603 }
604 
605 /**
606  * msgdma_complete_descriptor - Mark the active descriptor as complete
607  * @mdev: Pointer to the Altera mSGDMA device structure
608  */
609 static void msgdma_complete_descriptor(struct msgdma_device *mdev)
610 {
611 	struct msgdma_sw_desc *desc;
612 
613 	desc = list_first_entry_or_null(&mdev->active_list,
614 					struct msgdma_sw_desc, node);
615 	if (!desc)
616 		return;
617 	list_del(&desc->node);
618 	dma_cookie_complete(&desc->async_tx);
619 	list_add_tail(&desc->node, &mdev->done_list);
620 }
621 
622 /**
623  * msgdma_free_descriptors - Free channel descriptors
624  * @mdev: Pointer to the Altera mSGDMA device structure
625  */
626 static void msgdma_free_descriptors(struct msgdma_device *mdev)
627 {
628 	msgdma_free_desc_list(mdev, &mdev->active_list);
629 	msgdma_free_desc_list(mdev, &mdev->pending_list);
630 	msgdma_free_desc_list(mdev, &mdev->done_list);
631 }
632 
633 /**
634  * msgdma_free_chan_resources - Free channel resources
635  * @dchan: DMA channel pointer
636  */
637 static void msgdma_free_chan_resources(struct dma_chan *dchan)
638 {
639 	struct msgdma_device *mdev = to_mdev(dchan);
640 	unsigned long flags;
641 
642 	spin_lock_irqsave(&mdev->lock, flags);
643 	msgdma_free_descriptors(mdev);
644 	spin_unlock_irqrestore(&mdev->lock, flags);
645 	kfree(mdev->sw_desq);
646 }
647 
648 /**
649  * msgdma_alloc_chan_resources - Allocate channel resources
650  * @dchan: DMA channel
651  *
652  * Return: Number of descriptors on success and failure value on error
653  */
654 static int msgdma_alloc_chan_resources(struct dma_chan *dchan)
655 {
656 	struct msgdma_device *mdev = to_mdev(dchan);
657 	struct msgdma_sw_desc *desc;
658 	int i;
659 
660 	mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT);
661 	if (!mdev->sw_desq)
662 		return -ENOMEM;
663 
664 	mdev->idle = true;
665 	mdev->desc_free_cnt = MSGDMA_DESC_NUM;
666 
667 	INIT_LIST_HEAD(&mdev->free_list);
668 
669 	for (i = 0; i < MSGDMA_DESC_NUM; i++) {
670 		desc = mdev->sw_desq + i;
671 		dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan);
672 		desc->async_tx.tx_submit = msgdma_tx_submit;
673 		list_add_tail(&desc->node, &mdev->free_list);
674 	}
675 
676 	return MSGDMA_DESC_NUM;
677 }
678 
679 /**
680  * msgdma_tasklet - Schedule completion tasklet
681  * @t: Pointer to the Altera sSGDMA channel structure
682  */
683 static void msgdma_tasklet(struct tasklet_struct *t)
684 {
685 	struct msgdma_device *mdev = from_tasklet(mdev, t, irq_tasklet);
686 	u32 count;
687 	u32 __maybe_unused size;
688 	u32 __maybe_unused status;
689 	unsigned long flags;
690 
691 	spin_lock_irqsave(&mdev->lock, flags);
692 
693 	if (mdev->resp) {
694 		/* Read number of responses that are available */
695 		count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL);
696 		dev_dbg(mdev->dev, "%s (%d): response count=%d\n",
697 			__func__, __LINE__, count);
698 	} else {
699 		count = 1;
700 	}
701 
702 	while (count--) {
703 		/*
704 		 * Read both longwords to purge this response from the FIFO
705 		 * On Avalon-MM implementations, size and status do not
706 		 * have any real values, like transferred bytes or error
707 		 * bits. So we need to just drop these values.
708 		 */
709 		if (mdev->resp) {
710 			size = ioread32(mdev->resp +
711 					MSGDMA_RESP_BYTES_TRANSFERRED);
712 			status = ioread32(mdev->resp +
713 					MSGDMA_RESP_STATUS);
714 		}
715 
716 		msgdma_complete_descriptor(mdev);
717 	}
718 
719 	spin_unlock_irqrestore(&mdev->lock, flags);
720 
721 	msgdma_chan_desc_cleanup(mdev);
722 }
723 
724 /**
725  * msgdma_irq_handler - Altera mSGDMA Interrupt handler
726  * @irq: IRQ number
727  * @data: Pointer to the Altera mSGDMA device structure
728  *
729  * Return: IRQ_HANDLED/IRQ_NONE
730  */
731 static irqreturn_t msgdma_irq_handler(int irq, void *data)
732 {
733 	struct msgdma_device *mdev = data;
734 	u32 status;
735 
736 	status = ioread32(mdev->csr + MSGDMA_CSR_STATUS);
737 	if ((status & MSGDMA_CSR_STAT_BUSY) == 0) {
738 		/* Start next transfer if the DMA controller is idle */
739 		spin_lock(&mdev->lock);
740 		mdev->idle = true;
741 		msgdma_start_transfer(mdev);
742 		spin_unlock(&mdev->lock);
743 	}
744 
745 	tasklet_schedule(&mdev->irq_tasklet);
746 
747 	/* Clear interrupt in mSGDMA controller */
748 	iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS);
749 
750 	return IRQ_HANDLED;
751 }
752 
753 /**
754  * msgdma_dev_remove() - Device remove function
755  * @mdev: Pointer to the Altera mSGDMA device structure
756  */
757 static void msgdma_dev_remove(struct msgdma_device *mdev)
758 {
759 	if (!mdev)
760 		return;
761 
762 	devm_free_irq(mdev->dev, mdev->irq, mdev);
763 	tasklet_kill(&mdev->irq_tasklet);
764 	list_del(&mdev->dmachan.device_node);
765 }
766 
767 static int request_and_map(struct platform_device *pdev, const char *name,
768 			   struct resource **res, void __iomem **ptr,
769 			   bool optional)
770 {
771 	struct resource *region;
772 	struct device *device = &pdev->dev;
773 
774 	*res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
775 	if (*res == NULL) {
776 		if (optional) {
777 			*ptr = NULL;
778 			dev_info(device, "optional resource %s not defined\n",
779 				 name);
780 			return 0;
781 		}
782 		dev_err(device, "mandatory resource %s not defined\n", name);
783 		return -ENODEV;
784 	}
785 
786 	region = devm_request_mem_region(device, (*res)->start,
787 					 resource_size(*res), dev_name(device));
788 	if (region == NULL) {
789 		dev_err(device, "unable to request %s\n", name);
790 		return -EBUSY;
791 	}
792 
793 	*ptr = devm_ioremap(device, region->start,
794 				    resource_size(region));
795 	if (*ptr == NULL) {
796 		dev_err(device, "ioremap of %s failed!", name);
797 		return -ENOMEM;
798 	}
799 
800 	return 0;
801 }
802 
803 /**
804  * msgdma_probe - Driver probe function
805  * @pdev: Pointer to the platform_device structure
806  *
807  * Return: '0' on success and failure value on error
808  */
809 static int msgdma_probe(struct platform_device *pdev)
810 {
811 	struct msgdma_device *mdev;
812 	struct dma_device *dma_dev;
813 	struct resource *dma_res;
814 	int ret;
815 
816 	mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT);
817 	if (!mdev)
818 		return -ENOMEM;
819 
820 	mdev->dev = &pdev->dev;
821 
822 	/* Map CSR space */
823 	ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr, false);
824 	if (ret)
825 		return ret;
826 
827 	/* Map (extended) descriptor space */
828 	ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc, false);
829 	if (ret)
830 		return ret;
831 
832 	/* Map response space */
833 	ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp, true);
834 	if (ret)
835 		return ret;
836 
837 	platform_set_drvdata(pdev, mdev);
838 
839 	/* Get interrupt nr from platform data */
840 	mdev->irq = platform_get_irq(pdev, 0);
841 	if (mdev->irq < 0)
842 		return -ENXIO;
843 
844 	ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler,
845 			       0, dev_name(&pdev->dev), mdev);
846 	if (ret)
847 		return ret;
848 
849 	tasklet_setup(&mdev->irq_tasklet, msgdma_tasklet);
850 
851 	dma_cookie_init(&mdev->dmachan);
852 
853 	spin_lock_init(&mdev->lock);
854 
855 	INIT_LIST_HEAD(&mdev->active_list);
856 	INIT_LIST_HEAD(&mdev->pending_list);
857 	INIT_LIST_HEAD(&mdev->done_list);
858 	INIT_LIST_HEAD(&mdev->free_list);
859 
860 	dma_dev = &mdev->dmadev;
861 
862 	/* Set DMA capabilities */
863 	dma_cap_zero(dma_dev->cap_mask);
864 	dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
865 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
866 
867 	dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
868 	dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES);
869 	dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) |
870 		BIT(DMA_MEM_TO_MEM);
871 	dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
872 
873 	/* Init DMA link list */
874 	INIT_LIST_HEAD(&dma_dev->channels);
875 
876 	/* Set base routines */
877 	dma_dev->device_tx_status = dma_cookie_status;
878 	dma_dev->device_issue_pending = msgdma_issue_pending;
879 	dma_dev->dev = &pdev->dev;
880 
881 	dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES;
882 	dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy;
883 	dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg;
884 	dma_dev->device_config = msgdma_dma_config;
885 
886 	dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources;
887 	dma_dev->device_free_chan_resources = msgdma_free_chan_resources;
888 
889 	mdev->dmachan.device = dma_dev;
890 	list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels);
891 
892 	/* Set DMA mask to 64 bits */
893 	ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
894 	if (ret) {
895 		dev_warn(&pdev->dev, "unable to set coherent mask to 64");
896 		goto fail;
897 	}
898 
899 	msgdma_reset(mdev);
900 
901 	ret = dma_async_device_register(dma_dev);
902 	if (ret)
903 		goto fail;
904 
905 	ret = of_dma_controller_register(pdev->dev.of_node,
906 					 of_dma_xlate_by_chan_id, dma_dev);
907 	if (ret == -EINVAL)
908 		dev_warn(&pdev->dev, "device was not probed from DT");
909 	else if (ret && ret != -ENODEV)
910 		goto fail;
911 
912 	dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n");
913 
914 	return 0;
915 
916 fail:
917 	msgdma_dev_remove(mdev);
918 
919 	return ret;
920 }
921 
922 /**
923  * msgdma_remove() - Driver remove function
924  * @pdev: Pointer to the platform_device structure
925  *
926  * Return: Always '0'
927  */
928 static void msgdma_remove(struct platform_device *pdev)
929 {
930 	struct msgdma_device *mdev = platform_get_drvdata(pdev);
931 
932 	if (pdev->dev.of_node)
933 		of_dma_controller_free(pdev->dev.of_node);
934 	dma_async_device_unregister(&mdev->dmadev);
935 	msgdma_dev_remove(mdev);
936 
937 	dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n");
938 }
939 
940 #ifdef CONFIG_OF
941 static const struct of_device_id msgdma_match[] = {
942 	{ .compatible = "altr,socfpga-msgdma", },
943 	{ }
944 };
945 
946 MODULE_DEVICE_TABLE(of, msgdma_match);
947 #endif
948 
949 static struct platform_driver msgdma_driver = {
950 	.driver = {
951 		.name = "altera-msgdma",
952 		.of_match_table = of_match_ptr(msgdma_match),
953 	},
954 	.probe = msgdma_probe,
955 	.remove = msgdma_remove,
956 };
957 
958 module_platform_driver(msgdma_driver);
959 
960 MODULE_ALIAS("platform:altera-msgdma");
961 MODULE_DESCRIPTION("Altera mSGDMA driver");
962 MODULE_AUTHOR("Stefan Roese <sr@denx.de>");
963 MODULE_LICENSE("GPL");
964