xref: /linux/drivers/rapidio/devices/tsi721_dma.c (revision ca55b2fef3a9373fcfc30f82fd26bc7fccbda732)
1 /*
2  * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
3  *
4  * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
5  * Alexandre Bounine <alexandre.bounine@idt.com>
6  *
7  * This program is free software; you can redistribute it and/or modify it
8  * under the terms of the GNU General Public License as published by the Free
9  * Software Foundation; either version 2 of the License, or (at your option)
10  * any later version.
11  *
12  * This program is distributed in the hope that it will be useful, but WITHOUT
13  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
15  * more details.
16  *
17  * The full GNU General Public License is included in this distribution in the
18  * file called COPYING.
19  */
20 
21 #include <linux/io.h>
22 #include <linux/errno.h>
23 #include <linux/init.h>
24 #include <linux/ioport.h>
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/rio.h>
29 #include <linux/rio_drv.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/interrupt.h>
32 #include <linux/kfifo.h>
33 #include <linux/delay.h>
34 #include "../../dma/dmaengine.h"
35 
36 #include "tsi721.h"
37 
38 #define TSI721_DMA_TX_QUEUE_SZ	16	/* number of transaction descriptors */
39 
40 #ifdef CONFIG_PCI_MSI
41 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
42 #endif
43 static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
44 
45 static unsigned int dma_desc_per_channel = 128;
46 module_param(dma_desc_per_channel, uint, S_IWUSR | S_IRUGO);
47 MODULE_PARM_DESC(dma_desc_per_channel,
48 		 "Number of DMA descriptors per channel (default: 128)");
49 
50 static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
51 {
52 	return container_of(chan, struct tsi721_bdma_chan, dchan);
53 }
54 
55 static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
56 {
57 	return container_of(ddev, struct rio_mport, dma)->priv;
58 }
59 
60 static inline
61 struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
62 {
63 	return container_of(txd, struct tsi721_tx_desc, txd);
64 }
65 
66 static inline
67 struct tsi721_tx_desc *tsi721_dma_first_active(
68 				struct tsi721_bdma_chan *bdma_chan)
69 {
70 	return list_first_entry(&bdma_chan->active_list,
71 				struct tsi721_tx_desc, desc_node);
72 }
73 
74 static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
75 {
76 	struct tsi721_dma_desc *bd_ptr;
77 	struct device *dev = bdma_chan->dchan.device->dev;
78 	u64		*sts_ptr;
79 	dma_addr_t	bd_phys;
80 	dma_addr_t	sts_phys;
81 	int		sts_size;
82 #ifdef CONFIG_PCI_MSI
83 	struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
84 #endif
85 
86 	dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
87 
88 	/*
89 	 * Allocate space for DMA descriptors
90 	 * (add an extra element for link descriptor)
91 	 */
92 	bd_ptr = dma_zalloc_coherent(dev,
93 				(bd_num + 1) * sizeof(struct tsi721_dma_desc),
94 				&bd_phys, GFP_KERNEL);
95 	if (!bd_ptr)
96 		return -ENOMEM;
97 
98 	bdma_chan->bd_num = bd_num;
99 	bdma_chan->bd_phys = bd_phys;
100 	bdma_chan->bd_base = bd_ptr;
101 
102 	dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
103 		bd_ptr, (unsigned long long)bd_phys);
104 
105 	/* Allocate space for descriptor status FIFO */
106 	sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
107 					(bd_num + 1) : TSI721_DMA_MINSTSSZ;
108 	sts_size = roundup_pow_of_two(sts_size);
109 	sts_ptr = dma_zalloc_coherent(dev,
110 				     sts_size * sizeof(struct tsi721_dma_sts),
111 				     &sts_phys, GFP_KERNEL);
112 	if (!sts_ptr) {
113 		/* Free space allocated for DMA descriptors */
114 		dma_free_coherent(dev,
115 				  (bd_num + 1) * sizeof(struct tsi721_dma_desc),
116 				  bd_ptr, bd_phys);
117 		bdma_chan->bd_base = NULL;
118 		return -ENOMEM;
119 	}
120 
121 	bdma_chan->sts_phys = sts_phys;
122 	bdma_chan->sts_base = sts_ptr;
123 	bdma_chan->sts_size = sts_size;
124 
125 	dev_dbg(dev,
126 		"desc status FIFO @ %p (phys = %llx) size=0x%x\n",
127 		sts_ptr, (unsigned long long)sts_phys, sts_size);
128 
129 	/* Initialize DMA descriptors ring using added link descriptor */
130 	bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
131 	bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
132 						 TSI721_DMAC_DPTRL_MASK);
133 	bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
134 
135 	/* Setup DMA descriptor pointers */
136 	iowrite32(((u64)bd_phys >> 32),
137 		bdma_chan->regs + TSI721_DMAC_DPTRH);
138 	iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
139 		bdma_chan->regs + TSI721_DMAC_DPTRL);
140 
141 	/* Setup descriptor status FIFO */
142 	iowrite32(((u64)sts_phys >> 32),
143 		bdma_chan->regs + TSI721_DMAC_DSBH);
144 	iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
145 		bdma_chan->regs + TSI721_DMAC_DSBL);
146 	iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
147 		bdma_chan->regs + TSI721_DMAC_DSSZ);
148 
149 	/* Clear interrupt bits */
150 	iowrite32(TSI721_DMAC_INT_ALL,
151 		bdma_chan->regs + TSI721_DMAC_INT);
152 
153 	ioread32(bdma_chan->regs + TSI721_DMAC_INT);
154 
155 #ifdef CONFIG_PCI_MSI
156 	/* Request interrupt service if we are in MSI-X mode */
157 	if (priv->flags & TSI721_USING_MSIX) {
158 		int rc, idx;
159 
160 		idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
161 
162 		rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
163 				 priv->msix[idx].irq_name, (void *)bdma_chan);
164 
165 		if (rc) {
166 			dev_dbg(dev, "Unable to get MSI-X for BDMA%d-DONE\n",
167 				bdma_chan->id);
168 			goto err_out;
169 		}
170 
171 		idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
172 
173 		rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
174 				priv->msix[idx].irq_name, (void *)bdma_chan);
175 
176 		if (rc)	{
177 			dev_dbg(dev, "Unable to get MSI-X for BDMA%d-INT\n",
178 				bdma_chan->id);
179 			free_irq(
180 				priv->msix[TSI721_VECT_DMA0_DONE +
181 					    bdma_chan->id].vector,
182 				(void *)bdma_chan);
183 		}
184 
185 err_out:
186 		if (rc) {
187 			/* Free space allocated for DMA descriptors */
188 			dma_free_coherent(dev,
189 				(bd_num + 1) * sizeof(struct tsi721_dma_desc),
190 				bd_ptr, bd_phys);
191 			bdma_chan->bd_base = NULL;
192 
193 			/* Free space allocated for status descriptors */
194 			dma_free_coherent(dev,
195 				sts_size * sizeof(struct tsi721_dma_sts),
196 				sts_ptr, sts_phys);
197 			bdma_chan->sts_base = NULL;
198 
199 			return -EIO;
200 		}
201 	}
202 #endif /* CONFIG_PCI_MSI */
203 
204 	/* Toggle DMA channel initialization */
205 	iowrite32(TSI721_DMAC_CTL_INIT,	bdma_chan->regs + TSI721_DMAC_CTL);
206 	ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
207 	bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
208 	bdma_chan->sts_rdptr = 0;
209 	udelay(10);
210 
211 	return 0;
212 }
213 
214 static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
215 {
216 	u32 ch_stat;
217 #ifdef CONFIG_PCI_MSI
218 	struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
219 #endif
220 
221 	if (bdma_chan->bd_base == NULL)
222 		return 0;
223 
224 	/* Check if DMA channel still running */
225 	ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
226 	if (ch_stat & TSI721_DMAC_STS_RUN)
227 		return -EFAULT;
228 
229 	/* Put DMA channel into init state */
230 	iowrite32(TSI721_DMAC_CTL_INIT,	bdma_chan->regs + TSI721_DMAC_CTL);
231 
232 #ifdef CONFIG_PCI_MSI
233 	if (priv->flags & TSI721_USING_MSIX) {
234 		free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
235 				    bdma_chan->id].vector, (void *)bdma_chan);
236 		free_irq(priv->msix[TSI721_VECT_DMA0_INT +
237 				    bdma_chan->id].vector, (void *)bdma_chan);
238 	}
239 #endif /* CONFIG_PCI_MSI */
240 
241 	/* Free space allocated for DMA descriptors */
242 	dma_free_coherent(bdma_chan->dchan.device->dev,
243 		(bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
244 		bdma_chan->bd_base, bdma_chan->bd_phys);
245 	bdma_chan->bd_base = NULL;
246 
247 	/* Free space allocated for status FIFO */
248 	dma_free_coherent(bdma_chan->dchan.device->dev,
249 		bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
250 		bdma_chan->sts_base, bdma_chan->sts_phys);
251 	bdma_chan->sts_base = NULL;
252 	return 0;
253 }
254 
255 static void
256 tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
257 {
258 	if (enable) {
259 		/* Clear pending BDMA channel interrupts */
260 		iowrite32(TSI721_DMAC_INT_ALL,
261 			bdma_chan->regs + TSI721_DMAC_INT);
262 		ioread32(bdma_chan->regs + TSI721_DMAC_INT);
263 		/* Enable BDMA channel interrupts */
264 		iowrite32(TSI721_DMAC_INT_ALL,
265 			bdma_chan->regs + TSI721_DMAC_INTE);
266 	} else {
267 		/* Disable BDMA channel interrupts */
268 		iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
269 		/* Clear pending BDMA channel interrupts */
270 		iowrite32(TSI721_DMAC_INT_ALL,
271 			bdma_chan->regs + TSI721_DMAC_INT);
272 	}
273 
274 }
275 
276 static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
277 {
278 	u32 sts;
279 
280 	sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
281 	return ((sts & TSI721_DMAC_STS_RUN) == 0);
282 }
283 
284 void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
285 {
286 	/* Disable BDMA channel interrupts */
287 	iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
288 	if (bdma_chan->active)
289 		tasklet_schedule(&bdma_chan->tasklet);
290 }
291 
292 #ifdef CONFIG_PCI_MSI
293 /**
294  * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
295  * @irq: Linux interrupt number
296  * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
297  *
298  * Handles BDMA channel interrupts signaled using MSI-X.
299  */
300 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
301 {
302 	struct tsi721_bdma_chan *bdma_chan = ptr;
303 
304 	tsi721_bdma_handler(bdma_chan);
305 	return IRQ_HANDLED;
306 }
307 #endif /* CONFIG_PCI_MSI */
308 
309 /* Must be called with the spinlock held */
310 static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
311 {
312 	if (!tsi721_dma_is_idle(bdma_chan)) {
313 		dev_err(bdma_chan->dchan.device->dev,
314 			"BUG: Attempt to start non-idle channel\n");
315 		return;
316 	}
317 
318 	if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
319 		dev_err(bdma_chan->dchan.device->dev,
320 			"BUG: Attempt to start DMA with no BDs ready\n");
321 		return;
322 	}
323 
324 	dev_dbg(bdma_chan->dchan.device->dev,
325 		"%s: chan_%d (wrc=%d)\n", __func__, bdma_chan->id,
326 		bdma_chan->wr_count_next);
327 
328 	iowrite32(bdma_chan->wr_count_next,
329 		bdma_chan->regs + TSI721_DMAC_DWRCNT);
330 	ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
331 
332 	bdma_chan->wr_count = bdma_chan->wr_count_next;
333 }
334 
335 static int
336 tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
337 		      struct tsi721_dma_desc *bd_ptr,
338 		      struct scatterlist *sg, u32 sys_size)
339 {
340 	u64 rio_addr;
341 
342 	if (bd_ptr == NULL)
343 		return -EINVAL;
344 
345 	/* Initialize DMA descriptor */
346 	bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
347 				      (desc->rtype << 19) | desc->destid);
348 	bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
349 				     (sys_size << 26));
350 	rio_addr = (desc->rio_addr >> 2) |
351 				((u64)(desc->rio_addr_u & 0x3) << 62);
352 	bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
353 	bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
354 	bd_ptr->t1.bufptr_lo = cpu_to_le32(
355 					(u64)sg_dma_address(sg) & 0xffffffff);
356 	bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
357 	bd_ptr->t1.s_dist = 0;
358 	bd_ptr->t1.s_size = 0;
359 
360 	return 0;
361 }
362 
363 static int
364 tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
365 {
366 	if (bd_ptr == NULL)
367 		return -EINVAL;
368 
369 	/* Update DMA descriptor */
370 	if (interrupt)
371 		bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
372 	bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
373 
374 	return 0;
375 }
376 
377 static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
378 			      struct tsi721_tx_desc *desc)
379 {
380 	struct dma_async_tx_descriptor *txd = &desc->txd;
381 	dma_async_tx_callback callback = txd->callback;
382 	void *param = txd->callback_param;
383 
384 	list_move(&desc->desc_node, &bdma_chan->free_list);
385 
386 	if (callback)
387 		callback(param);
388 }
389 
390 static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
391 {
392 	u32 srd_ptr;
393 	u64 *sts_ptr;
394 	int i, j;
395 
396 	/* Check and clear descriptor status FIFO entries */
397 	srd_ptr = bdma_chan->sts_rdptr;
398 	sts_ptr = bdma_chan->sts_base;
399 	j = srd_ptr * 8;
400 	while (sts_ptr[j]) {
401 		for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
402 			sts_ptr[j] = 0;
403 
404 		++srd_ptr;
405 		srd_ptr %= bdma_chan->sts_size;
406 		j = srd_ptr * 8;
407 	}
408 
409 	iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
410 	bdma_chan->sts_rdptr = srd_ptr;
411 }
412 
413 /* Must be called with the channel spinlock held */
414 static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
415 {
416 	struct dma_chan *dchan = desc->txd.chan;
417 	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
418 	u32 sys_size;
419 	u64 rio_addr;
420 	dma_addr_t next_addr;
421 	u32 bcount;
422 	struct scatterlist *sg;
423 	unsigned int i;
424 	int err = 0;
425 	struct tsi721_dma_desc *bd_ptr = NULL;
426 	u32 idx, rd_idx;
427 	u32 add_count = 0;
428 
429 	if (!tsi721_dma_is_idle(bdma_chan)) {
430 		dev_err(bdma_chan->dchan.device->dev,
431 			"BUG: Attempt to use non-idle channel\n");
432 		return -EIO;
433 	}
434 
435 	/*
436 	 * Fill DMA channel's hardware buffer descriptors.
437 	 * (NOTE: RapidIO destination address is limited to 64 bits for now)
438 	 */
439 	rio_addr = desc->rio_addr;
440 	next_addr = -1;
441 	bcount = 0;
442 	sys_size = dma_to_mport(bdma_chan->dchan.device)->sys_size;
443 
444 	rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
445 	rd_idx %= (bdma_chan->bd_num + 1);
446 
447 	idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
448 	if (idx == bdma_chan->bd_num) {
449 		/* wrap around link descriptor */
450 		idx = 0;
451 		add_count++;
452 	}
453 
454 	dev_dbg(dchan->device->dev, "%s: BD ring status: rdi=%d wri=%d\n",
455 		__func__, rd_idx, idx);
456 
457 	for_each_sg(desc->sg, sg, desc->sg_len, i) {
458 
459 		dev_dbg(dchan->device->dev, "sg%d/%d addr: 0x%llx len: %d\n",
460 			i, desc->sg_len,
461 			(unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
462 
463 		if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
464 			dev_err(dchan->device->dev,
465 				"%s: SG entry %d is too large\n", __func__, i);
466 			err = -EINVAL;
467 			break;
468 		}
469 
470 		/*
471 		 * If this sg entry forms contiguous block with previous one,
472 		 * try to merge it into existing DMA descriptor
473 		 */
474 		if (next_addr == sg_dma_address(sg) &&
475 		    bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
476 			/* Adjust byte count of the descriptor */
477 			bcount += sg_dma_len(sg);
478 			goto entry_done;
479 		} else if (next_addr != -1) {
480 			/* Finalize descriptor using total byte count value */
481 			tsi721_desc_fill_end(bd_ptr, bcount, 0);
482 			dev_dbg(dchan->device->dev,
483 				"%s: prev desc final len: %d\n",
484 				__func__, bcount);
485 		}
486 
487 		desc->rio_addr = rio_addr;
488 
489 		if (i && idx == rd_idx) {
490 			dev_dbg(dchan->device->dev,
491 				"%s: HW descriptor ring is full @ %d\n",
492 				__func__, i);
493 			desc->sg = sg;
494 			desc->sg_len -= i;
495 			break;
496 		}
497 
498 		bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
499 		err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
500 		if (err) {
501 			dev_err(dchan->device->dev,
502 				"Failed to build desc: err=%d\n", err);
503 			break;
504 		}
505 
506 		dev_dbg(dchan->device->dev, "bd_ptr = %p did=%d raddr=0x%llx\n",
507 			bd_ptr, desc->destid, desc->rio_addr);
508 
509 		next_addr = sg_dma_address(sg);
510 		bcount = sg_dma_len(sg);
511 
512 		add_count++;
513 		if (++idx == bdma_chan->bd_num) {
514 			/* wrap around link descriptor */
515 			idx = 0;
516 			add_count++;
517 		}
518 
519 entry_done:
520 		if (sg_is_last(sg)) {
521 			tsi721_desc_fill_end(bd_ptr, bcount, 0);
522 			dev_dbg(dchan->device->dev, "%s: last desc final len: %d\n",
523 				__func__, bcount);
524 			desc->sg_len = 0;
525 		} else {
526 			rio_addr += sg_dma_len(sg);
527 			next_addr += sg_dma_len(sg);
528 		}
529 	}
530 
531 	if (!err)
532 		bdma_chan->wr_count_next += add_count;
533 
534 	return err;
535 }
536 
537 static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
538 {
539 	struct tsi721_tx_desc *desc;
540 	int err;
541 
542 	dev_dbg(bdma_chan->dchan.device->dev, "%s: Enter\n", __func__);
543 
544 	/*
545 	 * If there are any new transactions in the queue add them
546 	 * into the processing list
547 	 */
548 	if (!list_empty(&bdma_chan->queue))
549 		list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
550 
551 	/* Start new transaction (if available) */
552 	if (!list_empty(&bdma_chan->active_list)) {
553 		desc = tsi721_dma_first_active(bdma_chan);
554 		err = tsi721_submit_sg(desc);
555 		if (!err)
556 			tsi721_start_dma(bdma_chan);
557 		else {
558 			tsi721_dma_tx_err(bdma_chan, desc);
559 			dev_dbg(bdma_chan->dchan.device->dev,
560 				"ERR: tsi721_submit_sg failed with err=%d\n",
561 				err);
562 		}
563 	}
564 
565 	dev_dbg(bdma_chan->dchan.device->dev, "%s: Exit\n", __func__);
566 }
567 
568 static void tsi721_dma_tasklet(unsigned long data)
569 {
570 	struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
571 	u32 dmac_int, dmac_sts;
572 
573 	dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
574 	dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
575 		__func__, bdma_chan->id, dmac_int);
576 	/* Clear channel interrupts */
577 	iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
578 
579 	if (dmac_int & TSI721_DMAC_INT_ERR) {
580 		dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
581 		dev_err(bdma_chan->dchan.device->dev,
582 			"%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
583 			__func__, bdma_chan->id, dmac_sts);
584 	}
585 
586 	if (dmac_int & TSI721_DMAC_INT_STFULL) {
587 		dev_err(bdma_chan->dchan.device->dev,
588 			"%s: DMAC%d descriptor status FIFO is full\n",
589 			__func__, bdma_chan->id);
590 	}
591 
592 	if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
593 		struct tsi721_tx_desc *desc;
594 
595 		tsi721_clr_stat(bdma_chan);
596 		spin_lock(&bdma_chan->lock);
597 		desc = tsi721_dma_first_active(bdma_chan);
598 
599 		if (desc->sg_len == 0) {
600 			dma_async_tx_callback callback = NULL;
601 			void *param = NULL;
602 
603 			desc->status = DMA_COMPLETE;
604 			dma_cookie_complete(&desc->txd);
605 			if (desc->txd.flags & DMA_PREP_INTERRUPT) {
606 				callback = desc->txd.callback;
607 				param = desc->txd.callback_param;
608 			}
609 			list_move(&desc->desc_node, &bdma_chan->free_list);
610 			spin_unlock(&bdma_chan->lock);
611 			if (callback)
612 				callback(param);
613 			spin_lock(&bdma_chan->lock);
614 		}
615 
616 		tsi721_advance_work(bdma_chan);
617 		spin_unlock(&bdma_chan->lock);
618 	}
619 
620 	/* Re-Enable BDMA channel interrupts */
621 	iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
622 }
623 
624 static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
625 {
626 	struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
627 	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
628 	dma_cookie_t cookie;
629 
630 	/* Check if the descriptor is detached from any lists */
631 	if (!list_empty(&desc->desc_node)) {
632 		dev_err(bdma_chan->dchan.device->dev,
633 			"%s: wrong state of descriptor %p\n", __func__, txd);
634 		return -EIO;
635 	}
636 
637 	spin_lock_bh(&bdma_chan->lock);
638 
639 	if (!bdma_chan->active) {
640 		spin_unlock_bh(&bdma_chan->lock);
641 		return -ENODEV;
642 	}
643 
644 	cookie = dma_cookie_assign(txd);
645 	desc->status = DMA_IN_PROGRESS;
646 	list_add_tail(&desc->desc_node, &bdma_chan->queue);
647 
648 	spin_unlock_bh(&bdma_chan->lock);
649 	return cookie;
650 }
651 
652 static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
653 {
654 	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
655 	struct tsi721_tx_desc *desc = NULL;
656 	int i;
657 
658 	dev_dbg(dchan->device->dev, "%s: for channel %d\n",
659 		__func__, bdma_chan->id);
660 
661 	if (bdma_chan->bd_base)
662 		return TSI721_DMA_TX_QUEUE_SZ;
663 
664 	/* Initialize BDMA channel */
665 	if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
666 		dev_err(dchan->device->dev, "Unable to initialize data DMA"
667 			" channel %d, aborting\n", bdma_chan->id);
668 		return -ENODEV;
669 	}
670 
671 	/* Allocate queue of transaction descriptors */
672 	desc = kcalloc(TSI721_DMA_TX_QUEUE_SZ, sizeof(struct tsi721_tx_desc),
673 			GFP_KERNEL);
674 	if (!desc) {
675 		dev_err(dchan->device->dev,
676 			"Failed to allocate logical descriptors\n");
677 		tsi721_bdma_ch_free(bdma_chan);
678 		return -ENOMEM;
679 	}
680 
681 	bdma_chan->tx_desc = desc;
682 
683 	for (i = 0; i < TSI721_DMA_TX_QUEUE_SZ; i++) {
684 		dma_async_tx_descriptor_init(&desc[i].txd, dchan);
685 		desc[i].txd.tx_submit = tsi721_tx_submit;
686 		desc[i].txd.flags = DMA_CTRL_ACK;
687 		list_add(&desc[i].desc_node, &bdma_chan->free_list);
688 	}
689 
690 	dma_cookie_init(dchan);
691 
692 	bdma_chan->active = true;
693 	tsi721_bdma_interrupt_enable(bdma_chan, 1);
694 
695 	return TSI721_DMA_TX_QUEUE_SZ;
696 }
697 
698 static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
699 {
700 	struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
701 
702 #ifdef CONFIG_PCI_MSI
703 	if (priv->flags & TSI721_USING_MSIX) {
704 		synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
705 					   bdma_chan->id].vector);
706 		synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
707 					   bdma_chan->id].vector);
708 	} else
709 #endif
710 	synchronize_irq(priv->pdev->irq);
711 }
712 
713 static void tsi721_free_chan_resources(struct dma_chan *dchan)
714 {
715 	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
716 
717 	dev_dbg(dchan->device->dev, "%s: for channel %d\n",
718 		__func__, bdma_chan->id);
719 
720 	if (bdma_chan->bd_base == NULL)
721 		return;
722 
723 	BUG_ON(!list_empty(&bdma_chan->active_list));
724 	BUG_ON(!list_empty(&bdma_chan->queue));
725 
726 	tsi721_bdma_interrupt_enable(bdma_chan, 0);
727 	bdma_chan->active = false;
728 	tsi721_sync_dma_irq(bdma_chan);
729 	tasklet_kill(&bdma_chan->tasklet);
730 	INIT_LIST_HEAD(&bdma_chan->free_list);
731 	kfree(bdma_chan->tx_desc);
732 	tsi721_bdma_ch_free(bdma_chan);
733 }
734 
735 static
736 enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
737 				 struct dma_tx_state *txstate)
738 {
739 	return dma_cookie_status(dchan, cookie, txstate);
740 }
741 
742 static void tsi721_issue_pending(struct dma_chan *dchan)
743 {
744 	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
745 
746 	dev_dbg(dchan->device->dev, "%s: Enter\n", __func__);
747 
748 	if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
749 		spin_lock_bh(&bdma_chan->lock);
750 		tsi721_advance_work(bdma_chan);
751 		spin_unlock_bh(&bdma_chan->lock);
752 	}
753 }
754 
755 static
756 struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
757 			struct scatterlist *sgl, unsigned int sg_len,
758 			enum dma_transfer_direction dir, unsigned long flags,
759 			void *tinfo)
760 {
761 	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
762 	struct tsi721_tx_desc *desc, *_d;
763 	struct rio_dma_ext *rext = tinfo;
764 	enum dma_rtype rtype;
765 	struct dma_async_tx_descriptor *txd = NULL;
766 
767 	if (!sgl || !sg_len) {
768 		dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
769 		return NULL;
770 	}
771 
772 	dev_dbg(dchan->device->dev, "%s: %s\n", __func__,
773 		(dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
774 
775 	if (dir == DMA_DEV_TO_MEM)
776 		rtype = NREAD;
777 	else if (dir == DMA_MEM_TO_DEV) {
778 		switch (rext->wr_type) {
779 		case RDW_ALL_NWRITE:
780 			rtype = ALL_NWRITE;
781 			break;
782 		case RDW_ALL_NWRITE_R:
783 			rtype = ALL_NWRITE_R;
784 			break;
785 		case RDW_LAST_NWRITE_R:
786 		default:
787 			rtype = LAST_NWRITE_R;
788 			break;
789 		}
790 	} else {
791 		dev_err(dchan->device->dev,
792 			"%s: Unsupported DMA direction option\n", __func__);
793 		return NULL;
794 	}
795 
796 	spin_lock_bh(&bdma_chan->lock);
797 
798 	list_for_each_entry_safe(desc, _d, &bdma_chan->free_list, desc_node) {
799 		if (async_tx_test_ack(&desc->txd)) {
800 			list_del_init(&desc->desc_node);
801 			desc->destid = rext->destid;
802 			desc->rio_addr = rext->rio_addr;
803 			desc->rio_addr_u = 0;
804 			desc->rtype = rtype;
805 			desc->sg_len	= sg_len;
806 			desc->sg	= sgl;
807 			txd		= &desc->txd;
808 			txd->flags	= flags;
809 			break;
810 		}
811 	}
812 
813 	spin_unlock_bh(&bdma_chan->lock);
814 
815 	return txd;
816 }
817 
818 static int tsi721_terminate_all(struct dma_chan *dchan)
819 {
820 	struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
821 	struct tsi721_tx_desc *desc, *_d;
822 	u32 dmac_int;
823 	LIST_HEAD(list);
824 
825 	dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
826 
827 	spin_lock_bh(&bdma_chan->lock);
828 
829 	bdma_chan->active = false;
830 
831 	if (!tsi721_dma_is_idle(bdma_chan)) {
832 		/* make sure to stop the transfer */
833 		iowrite32(TSI721_DMAC_CTL_SUSP,
834 			  bdma_chan->regs + TSI721_DMAC_CTL);
835 
836 		/* Wait until DMA channel stops */
837 		do {
838 			dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
839 		} while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
840 	}
841 
842 	list_splice_init(&bdma_chan->active_list, &list);
843 	list_splice_init(&bdma_chan->queue, &list);
844 
845 	list_for_each_entry_safe(desc, _d, &list, desc_node)
846 		tsi721_dma_tx_err(bdma_chan, desc);
847 
848 	spin_unlock_bh(&bdma_chan->lock);
849 
850 	return 0;
851 }
852 
853 int tsi721_register_dma(struct tsi721_device *priv)
854 {
855 	int i;
856 	int nr_channels = 0;
857 	int err;
858 	struct rio_mport *mport = priv->mport;
859 
860 	INIT_LIST_HEAD(&mport->dma.channels);
861 
862 	for (i = 0; i < TSI721_DMA_MAXCH; i++) {
863 		struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
864 
865 		if (i == TSI721_DMACH_MAINT)
866 			continue;
867 
868 		bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
869 
870 		bdma_chan->dchan.device = &mport->dma;
871 		bdma_chan->dchan.cookie = 1;
872 		bdma_chan->dchan.chan_id = i;
873 		bdma_chan->id = i;
874 		bdma_chan->active = false;
875 
876 		spin_lock_init(&bdma_chan->lock);
877 
878 		INIT_LIST_HEAD(&bdma_chan->active_list);
879 		INIT_LIST_HEAD(&bdma_chan->queue);
880 		INIT_LIST_HEAD(&bdma_chan->free_list);
881 
882 		tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
883 			     (unsigned long)bdma_chan);
884 		list_add_tail(&bdma_chan->dchan.device_node,
885 			      &mport->dma.channels);
886 		nr_channels++;
887 	}
888 
889 	mport->dma.chancnt = nr_channels;
890 	dma_cap_zero(mport->dma.cap_mask);
891 	dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
892 	dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
893 
894 	mport->dma.dev = &priv->pdev->dev;
895 	mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
896 	mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
897 	mport->dma.device_tx_status = tsi721_tx_status;
898 	mport->dma.device_issue_pending = tsi721_issue_pending;
899 	mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
900 	mport->dma.device_terminate_all = tsi721_terminate_all;
901 
902 	err = dma_async_device_register(&mport->dma);
903 	if (err)
904 		dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
905 
906 	return err;
907 }
908