1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
4 *
5 * Copyright (c) 2011-2014 Integrated Device Technology, Inc.
6 * Alexandre Bounine <alexandre.bounine@idt.com>
7 */
8
9 #include <linux/io.h>
10 #include <linux/errno.h>
11 #include <linux/init.h>
12 #include <linux/ioport.h>
13 #include <linux/kernel.h>
14 #include <linux/module.h>
15 #include <linux/pci.h>
16 #include <linux/rio.h>
17 #include <linux/rio_drv.h>
18 #include <linux/dma-mapping.h>
19 #include <linux/interrupt.h>
20 #include <linux/kfifo.h>
21 #include <linux/sched.h>
22 #include <linux/delay.h>
23 #include "../../dma/dmaengine.h"
24
25 #include "tsi721.h"
26
27 #ifdef CONFIG_PCI_MSI
28 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr);
29 #endif
30 static int tsi721_submit_sg(struct tsi721_tx_desc *desc);
31
32 static unsigned int dma_desc_per_channel = 128;
33 module_param(dma_desc_per_channel, uint, S_IRUGO);
34 MODULE_PARM_DESC(dma_desc_per_channel,
35 "Number of DMA descriptors per channel (default: 128)");
36
37 static unsigned int dma_txqueue_sz = 16;
38 module_param(dma_txqueue_sz, uint, S_IRUGO);
39 MODULE_PARM_DESC(dma_txqueue_sz,
40 "DMA Transactions Queue Size (default: 16)");
41
42 static u8 dma_sel = 0x7f;
43 module_param(dma_sel, byte, S_IRUGO);
44 MODULE_PARM_DESC(dma_sel,
45 "DMA Channel Selection Mask (default: 0x7f = all)");
46
to_tsi721_chan(struct dma_chan * chan)47 static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
48 {
49 return container_of(chan, struct tsi721_bdma_chan, dchan);
50 }
51
to_tsi721(struct dma_device * ddev)52 static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
53 {
54 return container_of(ddev, struct rio_mport, dma)->priv;
55 }
56
57 static inline
to_tsi721_desc(struct dma_async_tx_descriptor * txd)58 struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
59 {
60 return container_of(txd, struct tsi721_tx_desc, txd);
61 }
62
tsi721_bdma_ch_init(struct tsi721_bdma_chan * bdma_chan,int bd_num)63 static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan, int bd_num)
64 {
65 struct tsi721_dma_desc *bd_ptr;
66 struct device *dev = bdma_chan->dchan.device->dev;
67 u64 *sts_ptr;
68 dma_addr_t bd_phys;
69 dma_addr_t sts_phys;
70 int sts_size;
71 #ifdef CONFIG_PCI_MSI
72 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
73 #endif
74
75 tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
76
77 /*
78 * Allocate space for DMA descriptors
79 * (add an extra element for link descriptor)
80 */
81 bd_ptr = dma_alloc_coherent(dev,
82 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
83 &bd_phys, GFP_ATOMIC);
84 if (!bd_ptr)
85 return -ENOMEM;
86
87 bdma_chan->bd_num = bd_num;
88 bdma_chan->bd_phys = bd_phys;
89 bdma_chan->bd_base = bd_ptr;
90
91 tsi_debug(DMA, &bdma_chan->dchan.dev->device,
92 "DMAC%d descriptors @ %p (phys = %pad)",
93 bdma_chan->id, bd_ptr, &bd_phys);
94
95 /* Allocate space for descriptor status FIFO */
96 sts_size = ((bd_num + 1) >= TSI721_DMA_MINSTSSZ) ?
97 (bd_num + 1) : TSI721_DMA_MINSTSSZ;
98 sts_size = roundup_pow_of_two(sts_size);
99 sts_ptr = dma_alloc_coherent(dev,
100 sts_size * sizeof(struct tsi721_dma_sts),
101 &sts_phys, GFP_ATOMIC);
102 if (!sts_ptr) {
103 /* Free space allocated for DMA descriptors */
104 dma_free_coherent(dev,
105 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
106 bd_ptr, bd_phys);
107 bdma_chan->bd_base = NULL;
108 return -ENOMEM;
109 }
110
111 bdma_chan->sts_phys = sts_phys;
112 bdma_chan->sts_base = sts_ptr;
113 bdma_chan->sts_size = sts_size;
114
115 tsi_debug(DMA, &bdma_chan->dchan.dev->device,
116 "DMAC%d desc status FIFO @ %p (phys = %pad) size=0x%x",
117 bdma_chan->id, sts_ptr, &sts_phys, sts_size);
118
119 /* Initialize DMA descriptors ring using added link descriptor */
120 bd_ptr[bd_num].type_id = cpu_to_le32(DTYPE3 << 29);
121 bd_ptr[bd_num].next_lo = cpu_to_le32((u64)bd_phys &
122 TSI721_DMAC_DPTRL_MASK);
123 bd_ptr[bd_num].next_hi = cpu_to_le32((u64)bd_phys >> 32);
124
125 /* Setup DMA descriptor pointers */
126 iowrite32(((u64)bd_phys >> 32),
127 bdma_chan->regs + TSI721_DMAC_DPTRH);
128 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
129 bdma_chan->regs + TSI721_DMAC_DPTRL);
130
131 /* Setup descriptor status FIFO */
132 iowrite32(((u64)sts_phys >> 32),
133 bdma_chan->regs + TSI721_DMAC_DSBH);
134 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
135 bdma_chan->regs + TSI721_DMAC_DSBL);
136 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
137 bdma_chan->regs + TSI721_DMAC_DSSZ);
138
139 /* Clear interrupt bits */
140 iowrite32(TSI721_DMAC_INT_ALL,
141 bdma_chan->regs + TSI721_DMAC_INT);
142
143 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
144
145 #ifdef CONFIG_PCI_MSI
146 /* Request interrupt service if we are in MSI-X mode */
147 if (priv->flags & TSI721_USING_MSIX) {
148 int rc, idx;
149
150 idx = TSI721_VECT_DMA0_DONE + bdma_chan->id;
151
152 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
153 priv->msix[idx].irq_name, (void *)bdma_chan);
154
155 if (rc) {
156 tsi_debug(DMA, &bdma_chan->dchan.dev->device,
157 "Unable to get MSI-X for DMAC%d-DONE",
158 bdma_chan->id);
159 goto err_out;
160 }
161
162 idx = TSI721_VECT_DMA0_INT + bdma_chan->id;
163
164 rc = request_irq(priv->msix[idx].vector, tsi721_bdma_msix, 0,
165 priv->msix[idx].irq_name, (void *)bdma_chan);
166
167 if (rc) {
168 tsi_debug(DMA, &bdma_chan->dchan.dev->device,
169 "Unable to get MSI-X for DMAC%d-INT",
170 bdma_chan->id);
171 free_irq(
172 priv->msix[TSI721_VECT_DMA0_DONE +
173 bdma_chan->id].vector,
174 (void *)bdma_chan);
175 }
176
177 err_out:
178 if (rc) {
179 /* Free space allocated for DMA descriptors */
180 dma_free_coherent(dev,
181 (bd_num + 1) * sizeof(struct tsi721_dma_desc),
182 bd_ptr, bd_phys);
183 bdma_chan->bd_base = NULL;
184
185 /* Free space allocated for status descriptors */
186 dma_free_coherent(dev,
187 sts_size * sizeof(struct tsi721_dma_sts),
188 sts_ptr, sts_phys);
189 bdma_chan->sts_base = NULL;
190
191 return -EIO;
192 }
193 }
194 #endif /* CONFIG_PCI_MSI */
195
196 /* Toggle DMA channel initialization */
197 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
198 ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
199 bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
200 bdma_chan->sts_rdptr = 0;
201 udelay(10);
202
203 return 0;
204 }
205
tsi721_bdma_ch_free(struct tsi721_bdma_chan * bdma_chan)206 static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
207 {
208 u32 ch_stat;
209 #ifdef CONFIG_PCI_MSI
210 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
211 #endif
212
213 if (!bdma_chan->bd_base)
214 return 0;
215
216 /* Check if DMA channel still running */
217 ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
218 if (ch_stat & TSI721_DMAC_STS_RUN)
219 return -EFAULT;
220
221 /* Put DMA channel into init state */
222 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
223
224 #ifdef CONFIG_PCI_MSI
225 if (priv->flags & TSI721_USING_MSIX) {
226 free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
227 bdma_chan->id].vector, (void *)bdma_chan);
228 free_irq(priv->msix[TSI721_VECT_DMA0_INT +
229 bdma_chan->id].vector, (void *)bdma_chan);
230 }
231 #endif /* CONFIG_PCI_MSI */
232
233 /* Free space allocated for DMA descriptors */
234 dma_free_coherent(bdma_chan->dchan.device->dev,
235 (bdma_chan->bd_num + 1) * sizeof(struct tsi721_dma_desc),
236 bdma_chan->bd_base, bdma_chan->bd_phys);
237 bdma_chan->bd_base = NULL;
238
239 /* Free space allocated for status FIFO */
240 dma_free_coherent(bdma_chan->dchan.device->dev,
241 bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
242 bdma_chan->sts_base, bdma_chan->sts_phys);
243 bdma_chan->sts_base = NULL;
244 return 0;
245 }
246
247 static void
tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan * bdma_chan,int enable)248 tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
249 {
250 if (enable) {
251 /* Clear pending BDMA channel interrupts */
252 iowrite32(TSI721_DMAC_INT_ALL,
253 bdma_chan->regs + TSI721_DMAC_INT);
254 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
255 /* Enable BDMA channel interrupts */
256 iowrite32(TSI721_DMAC_INT_ALL,
257 bdma_chan->regs + TSI721_DMAC_INTE);
258 } else {
259 /* Disable BDMA channel interrupts */
260 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
261 /* Clear pending BDMA channel interrupts */
262 iowrite32(TSI721_DMAC_INT_ALL,
263 bdma_chan->regs + TSI721_DMAC_INT);
264 }
265
266 }
267
tsi721_dma_is_idle(struct tsi721_bdma_chan * bdma_chan)268 static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
269 {
270 u32 sts;
271
272 sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
273 return ((sts & TSI721_DMAC_STS_RUN) == 0);
274 }
275
tsi721_bdma_handler(struct tsi721_bdma_chan * bdma_chan)276 void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
277 {
278 /* Disable BDMA channel interrupts */
279 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
280 if (bdma_chan->active)
281 tasklet_hi_schedule(&bdma_chan->tasklet);
282 }
283
284 #ifdef CONFIG_PCI_MSI
285 /**
286 * tsi721_bdma_msix - MSI-X interrupt handler for BDMA channels
287 * @irq: Linux interrupt number
288 * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
289 *
290 * Handles BDMA channel interrupts signaled using MSI-X.
291 *
292 * Returns: %IRQ_HANDLED
293 */
tsi721_bdma_msix(int irq,void * ptr)294 static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
295 {
296 struct tsi721_bdma_chan *bdma_chan = ptr;
297
298 if (bdma_chan->active)
299 tasklet_hi_schedule(&bdma_chan->tasklet);
300 return IRQ_HANDLED;
301 }
302 #endif /* CONFIG_PCI_MSI */
303
304 /* Must be called with the spinlock held */
tsi721_start_dma(struct tsi721_bdma_chan * bdma_chan)305 static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
306 {
307 if (!tsi721_dma_is_idle(bdma_chan)) {
308 tsi_err(&bdma_chan->dchan.dev->device,
309 "DMAC%d Attempt to start non-idle channel",
310 bdma_chan->id);
311 return;
312 }
313
314 if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
315 tsi_err(&bdma_chan->dchan.dev->device,
316 "DMAC%d Attempt to start DMA with no BDs ready %d",
317 bdma_chan->id, task_pid_nr(current));
318 return;
319 }
320
321 tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d (wrc=%d) %d",
322 bdma_chan->id, bdma_chan->wr_count_next,
323 task_pid_nr(current));
324
325 iowrite32(bdma_chan->wr_count_next,
326 bdma_chan->regs + TSI721_DMAC_DWRCNT);
327 ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
328
329 bdma_chan->wr_count = bdma_chan->wr_count_next;
330 }
331
332 static int
tsi721_desc_fill_init(struct tsi721_tx_desc * desc,struct tsi721_dma_desc * bd_ptr,struct scatterlist * sg,u32 sys_size)333 tsi721_desc_fill_init(struct tsi721_tx_desc *desc,
334 struct tsi721_dma_desc *bd_ptr,
335 struct scatterlist *sg, u32 sys_size)
336 {
337 u64 rio_addr;
338
339 if (!bd_ptr)
340 return -EINVAL;
341
342 /* Initialize DMA descriptor */
343 bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
344 (desc->rtype << 19) | desc->destid);
345 bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
346 (sys_size << 26));
347 rio_addr = (desc->rio_addr >> 2) |
348 ((u64)(desc->rio_addr_u & 0x3) << 62);
349 bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
350 bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
351 bd_ptr->t1.bufptr_lo = cpu_to_le32(
352 (u64)sg_dma_address(sg) & 0xffffffff);
353 bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
354 bd_ptr->t1.s_dist = 0;
355 bd_ptr->t1.s_size = 0;
356
357 return 0;
358 }
359
360 static int
tsi721_desc_fill_end(struct tsi721_dma_desc * bd_ptr,u32 bcount,bool interrupt)361 tsi721_desc_fill_end(struct tsi721_dma_desc *bd_ptr, u32 bcount, bool interrupt)
362 {
363 if (!bd_ptr)
364 return -EINVAL;
365
366 /* Update DMA descriptor */
367 if (interrupt)
368 bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
369 bd_ptr->bcount |= cpu_to_le32(bcount & TSI721_DMAD_BCOUNT1);
370
371 return 0;
372 }
373
tsi721_dma_tx_err(struct tsi721_bdma_chan * bdma_chan,struct tsi721_tx_desc * desc)374 static void tsi721_dma_tx_err(struct tsi721_bdma_chan *bdma_chan,
375 struct tsi721_tx_desc *desc)
376 {
377 struct dma_async_tx_descriptor *txd = &desc->txd;
378 dma_async_tx_callback callback = txd->callback;
379 void *param = txd->callback_param;
380
381 list_move(&desc->desc_node, &bdma_chan->free_list);
382
383 if (callback)
384 callback(param);
385 }
386
tsi721_clr_stat(struct tsi721_bdma_chan * bdma_chan)387 static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
388 {
389 u32 srd_ptr;
390 u64 *sts_ptr;
391 int i, j;
392
393 /* Check and clear descriptor status FIFO entries */
394 srd_ptr = bdma_chan->sts_rdptr;
395 sts_ptr = bdma_chan->sts_base;
396 j = srd_ptr * 8;
397 while (sts_ptr[j]) {
398 for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
399 sts_ptr[j] = 0;
400
401 ++srd_ptr;
402 srd_ptr %= bdma_chan->sts_size;
403 j = srd_ptr * 8;
404 }
405
406 iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
407 bdma_chan->sts_rdptr = srd_ptr;
408 }
409
410 /* Must be called with the channel spinlock held */
tsi721_submit_sg(struct tsi721_tx_desc * desc)411 static int tsi721_submit_sg(struct tsi721_tx_desc *desc)
412 {
413 struct dma_chan *dchan = desc->txd.chan;
414 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
415 u32 sys_size;
416 u64 rio_addr;
417 dma_addr_t next_addr;
418 u32 bcount;
419 struct scatterlist *sg;
420 unsigned int i;
421 int err = 0;
422 struct tsi721_dma_desc *bd_ptr = NULL;
423 u32 idx, rd_idx;
424 u32 add_count = 0;
425 struct device *ch_dev = &dchan->dev->device;
426
427 if (!tsi721_dma_is_idle(bdma_chan)) {
428 tsi_err(ch_dev, "DMAC%d ERR: Attempt to use non-idle channel",
429 bdma_chan->id);
430 return -EIO;
431 }
432
433 /*
434 * Fill DMA channel's hardware buffer descriptors.
435 * (NOTE: RapidIO destination address is limited to 64 bits for now)
436 */
437 rio_addr = desc->rio_addr;
438 next_addr = -1;
439 bcount = 0;
440 sys_size = dma_to_mport(dchan->device)->sys_size;
441
442 rd_idx = ioread32(bdma_chan->regs + TSI721_DMAC_DRDCNT);
443 rd_idx %= (bdma_chan->bd_num + 1);
444
445 idx = bdma_chan->wr_count_next % (bdma_chan->bd_num + 1);
446 if (idx == bdma_chan->bd_num) {
447 /* wrap around link descriptor */
448 idx = 0;
449 add_count++;
450 }
451
452 tsi_debug(DMA, ch_dev, "DMAC%d BD ring status: rdi=%d wri=%d",
453 bdma_chan->id, rd_idx, idx);
454
455 for_each_sg(desc->sg, sg, desc->sg_len, i) {
456
457 tsi_debug(DMAV, ch_dev, "DMAC%d sg%d/%d addr: 0x%llx len: %d",
458 bdma_chan->id, i, desc->sg_len,
459 (unsigned long long)sg_dma_address(sg), sg_dma_len(sg));
460
461 if (sg_dma_len(sg) > TSI721_BDMA_MAX_BCOUNT) {
462 tsi_err(ch_dev, "DMAC%d SG entry %d is too large",
463 bdma_chan->id, i);
464 err = -EINVAL;
465 break;
466 }
467
468 /*
469 * If this sg entry forms contiguous block with previous one,
470 * try to merge it into existing DMA descriptor
471 */
472 if (next_addr == sg_dma_address(sg) &&
473 bcount + sg_dma_len(sg) <= TSI721_BDMA_MAX_BCOUNT) {
474 /* Adjust byte count of the descriptor */
475 bcount += sg_dma_len(sg);
476 goto entry_done;
477 } else if (next_addr != -1) {
478 /* Finalize descriptor using total byte count value */
479 tsi721_desc_fill_end(bd_ptr, bcount, 0);
480 tsi_debug(DMAV, ch_dev, "DMAC%d prev desc final len: %d",
481 bdma_chan->id, bcount);
482 }
483
484 desc->rio_addr = rio_addr;
485
486 if (i && idx == rd_idx) {
487 tsi_debug(DMAV, ch_dev,
488 "DMAC%d HW descriptor ring is full @ %d",
489 bdma_chan->id, i);
490 desc->sg = sg;
491 desc->sg_len -= i;
492 break;
493 }
494
495 bd_ptr = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[idx];
496 err = tsi721_desc_fill_init(desc, bd_ptr, sg, sys_size);
497 if (err) {
498 tsi_err(ch_dev, "Failed to build desc: err=%d", err);
499 break;
500 }
501
502 tsi_debug(DMAV, ch_dev, "DMAC%d bd_ptr = %p did=%d raddr=0x%llx",
503 bdma_chan->id, bd_ptr, desc->destid, desc->rio_addr);
504
505 next_addr = sg_dma_address(sg);
506 bcount = sg_dma_len(sg);
507
508 add_count++;
509 if (++idx == bdma_chan->bd_num) {
510 /* wrap around link descriptor */
511 idx = 0;
512 add_count++;
513 }
514
515 entry_done:
516 if (sg_is_last(sg)) {
517 tsi721_desc_fill_end(bd_ptr, bcount, 0);
518 tsi_debug(DMAV, ch_dev,
519 "DMAC%d last desc final len: %d",
520 bdma_chan->id, bcount);
521 desc->sg_len = 0;
522 } else {
523 rio_addr += sg_dma_len(sg);
524 next_addr += sg_dma_len(sg);
525 }
526 }
527
528 if (!err)
529 bdma_chan->wr_count_next += add_count;
530
531 return err;
532 }
533
tsi721_advance_work(struct tsi721_bdma_chan * bdma_chan,struct tsi721_tx_desc * desc)534 static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan,
535 struct tsi721_tx_desc *desc)
536 {
537 int err;
538
539 tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d", bdma_chan->id);
540
541 if (!tsi721_dma_is_idle(bdma_chan))
542 return;
543
544 /*
545 * If there is no data transfer in progress, fetch new descriptor from
546 * the pending queue.
547 */
548 if (!desc && !bdma_chan->active_tx && !list_empty(&bdma_chan->queue)) {
549 desc = list_first_entry(&bdma_chan->queue,
550 struct tsi721_tx_desc, desc_node);
551 list_del_init((&desc->desc_node));
552 bdma_chan->active_tx = desc;
553 }
554
555 if (desc) {
556 err = tsi721_submit_sg(desc);
557 if (!err)
558 tsi721_start_dma(bdma_chan);
559 else {
560 tsi721_dma_tx_err(bdma_chan, desc);
561 tsi_debug(DMA, &bdma_chan->dchan.dev->device,
562 "DMAC%d ERR: tsi721_submit_sg failed with err=%d",
563 bdma_chan->id, err);
564 }
565 }
566
567 tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d Exit",
568 bdma_chan->id);
569 }
570
tsi721_dma_tasklet(unsigned long data)571 static void tsi721_dma_tasklet(unsigned long data)
572 {
573 struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
574 u32 dmac_int, dmac_sts;
575
576 dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
577 tsi_debug(DMA, &bdma_chan->dchan.dev->device, "DMAC%d_INT = 0x%x",
578 bdma_chan->id, dmac_int);
579 /* Clear channel interrupts */
580 iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
581
582 if (dmac_int & TSI721_DMAC_INT_ERR) {
583 int i = 10000;
584 struct tsi721_tx_desc *desc;
585
586 desc = bdma_chan->active_tx;
587 dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
588 tsi_err(&bdma_chan->dchan.dev->device,
589 "DMAC%d_STS = 0x%x did=%d raddr=0x%llx",
590 bdma_chan->id, dmac_sts, desc->destid, desc->rio_addr);
591
592 /* Re-initialize DMA channel if possible */
593
594 if ((dmac_sts & TSI721_DMAC_STS_ABORT) == 0)
595 goto err_out;
596
597 tsi721_clr_stat(bdma_chan);
598
599 spin_lock(&bdma_chan->lock);
600
601 /* Put DMA channel into init state */
602 iowrite32(TSI721_DMAC_CTL_INIT,
603 bdma_chan->regs + TSI721_DMAC_CTL);
604 do {
605 udelay(1);
606 dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
607 i--;
608 } while ((dmac_sts & TSI721_DMAC_STS_ABORT) && i);
609
610 if (dmac_sts & TSI721_DMAC_STS_ABORT) {
611 tsi_err(&bdma_chan->dchan.dev->device,
612 "Failed to re-initiate DMAC%d", bdma_chan->id);
613 spin_unlock(&bdma_chan->lock);
614 goto err_out;
615 }
616
617 /* Setup DMA descriptor pointers */
618 iowrite32(((u64)bdma_chan->bd_phys >> 32),
619 bdma_chan->regs + TSI721_DMAC_DPTRH);
620 iowrite32(((u64)bdma_chan->bd_phys & TSI721_DMAC_DPTRL_MASK),
621 bdma_chan->regs + TSI721_DMAC_DPTRL);
622
623 /* Setup descriptor status FIFO */
624 iowrite32(((u64)bdma_chan->sts_phys >> 32),
625 bdma_chan->regs + TSI721_DMAC_DSBH);
626 iowrite32(((u64)bdma_chan->sts_phys & TSI721_DMAC_DSBL_MASK),
627 bdma_chan->regs + TSI721_DMAC_DSBL);
628 iowrite32(TSI721_DMAC_DSSZ_SIZE(bdma_chan->sts_size),
629 bdma_chan->regs + TSI721_DMAC_DSSZ);
630
631 /* Clear interrupt bits */
632 iowrite32(TSI721_DMAC_INT_ALL,
633 bdma_chan->regs + TSI721_DMAC_INT);
634
635 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
636
637 bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
638 bdma_chan->sts_rdptr = 0;
639 udelay(10);
640
641 desc = bdma_chan->active_tx;
642 desc->status = DMA_ERROR;
643 dma_cookie_complete(&desc->txd);
644 list_add(&desc->desc_node, &bdma_chan->free_list);
645 bdma_chan->active_tx = NULL;
646 if (bdma_chan->active)
647 tsi721_advance_work(bdma_chan, NULL);
648 spin_unlock(&bdma_chan->lock);
649 }
650
651 if (dmac_int & TSI721_DMAC_INT_STFULL) {
652 tsi_err(&bdma_chan->dchan.dev->device,
653 "DMAC%d descriptor status FIFO is full",
654 bdma_chan->id);
655 }
656
657 if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
658 struct tsi721_tx_desc *desc;
659
660 tsi721_clr_stat(bdma_chan);
661 spin_lock(&bdma_chan->lock);
662 desc = bdma_chan->active_tx;
663
664 if (desc->sg_len == 0) {
665 dma_async_tx_callback callback = NULL;
666 void *param = NULL;
667
668 desc->status = DMA_COMPLETE;
669 dma_cookie_complete(&desc->txd);
670 if (desc->txd.flags & DMA_PREP_INTERRUPT) {
671 callback = desc->txd.callback;
672 param = desc->txd.callback_param;
673 }
674 list_add(&desc->desc_node, &bdma_chan->free_list);
675 bdma_chan->active_tx = NULL;
676 if (bdma_chan->active)
677 tsi721_advance_work(bdma_chan, NULL);
678 spin_unlock(&bdma_chan->lock);
679 if (callback)
680 callback(param);
681 } else {
682 if (bdma_chan->active)
683 tsi721_advance_work(bdma_chan,
684 bdma_chan->active_tx);
685 spin_unlock(&bdma_chan->lock);
686 }
687 }
688 err_out:
689 /* Re-Enable BDMA channel interrupts */
690 iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
691 }
692
tsi721_tx_submit(struct dma_async_tx_descriptor * txd)693 static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
694 {
695 struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
696 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
697 dma_cookie_t cookie;
698
699 /* Check if the descriptor is detached from any lists */
700 if (!list_empty(&desc->desc_node)) {
701 tsi_err(&bdma_chan->dchan.dev->device,
702 "DMAC%d wrong state of descriptor %p",
703 bdma_chan->id, txd);
704 return -EIO;
705 }
706
707 spin_lock_bh(&bdma_chan->lock);
708
709 if (!bdma_chan->active) {
710 spin_unlock_bh(&bdma_chan->lock);
711 return -ENODEV;
712 }
713
714 cookie = dma_cookie_assign(txd);
715 desc->status = DMA_IN_PROGRESS;
716 list_add_tail(&desc->desc_node, &bdma_chan->queue);
717 tsi721_advance_work(bdma_chan, NULL);
718
719 spin_unlock_bh(&bdma_chan->lock);
720 return cookie;
721 }
722
tsi721_alloc_chan_resources(struct dma_chan * dchan)723 static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
724 {
725 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
726 struct tsi721_tx_desc *desc;
727 int i;
728
729 tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
730
731 if (bdma_chan->bd_base)
732 return dma_txqueue_sz;
733
734 /* Initialize BDMA channel */
735 if (tsi721_bdma_ch_init(bdma_chan, dma_desc_per_channel)) {
736 tsi_err(&dchan->dev->device, "Unable to initialize DMAC%d",
737 bdma_chan->id);
738 return -ENODEV;
739 }
740
741 /* Allocate queue of transaction descriptors */
742 desc = kcalloc(dma_txqueue_sz, sizeof(struct tsi721_tx_desc),
743 GFP_ATOMIC);
744 if (!desc) {
745 tsi721_bdma_ch_free(bdma_chan);
746 return -ENOMEM;
747 }
748
749 bdma_chan->tx_desc = desc;
750
751 for (i = 0; i < dma_txqueue_sz; i++) {
752 dma_async_tx_descriptor_init(&desc[i].txd, dchan);
753 desc[i].txd.tx_submit = tsi721_tx_submit;
754 desc[i].txd.flags = DMA_CTRL_ACK;
755 list_add(&desc[i].desc_node, &bdma_chan->free_list);
756 }
757
758 dma_cookie_init(dchan);
759
760 bdma_chan->active = true;
761 tsi721_bdma_interrupt_enable(bdma_chan, 1);
762
763 return dma_txqueue_sz;
764 }
765
tsi721_sync_dma_irq(struct tsi721_bdma_chan * bdma_chan)766 static void tsi721_sync_dma_irq(struct tsi721_bdma_chan *bdma_chan)
767 {
768 struct tsi721_device *priv = to_tsi721(bdma_chan->dchan.device);
769
770 #ifdef CONFIG_PCI_MSI
771 if (priv->flags & TSI721_USING_MSIX) {
772 synchronize_irq(priv->msix[TSI721_VECT_DMA0_DONE +
773 bdma_chan->id].vector);
774 synchronize_irq(priv->msix[TSI721_VECT_DMA0_INT +
775 bdma_chan->id].vector);
776 } else
777 #endif
778 synchronize_irq(priv->pdev->irq);
779 }
780
tsi721_free_chan_resources(struct dma_chan * dchan)781 static void tsi721_free_chan_resources(struct dma_chan *dchan)
782 {
783 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
784
785 tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
786
787 if (!bdma_chan->bd_base)
788 return;
789
790 tsi721_bdma_interrupt_enable(bdma_chan, 0);
791 bdma_chan->active = false;
792 tsi721_sync_dma_irq(bdma_chan);
793 tasklet_kill(&bdma_chan->tasklet);
794 INIT_LIST_HEAD(&bdma_chan->free_list);
795 kfree(bdma_chan->tx_desc);
796 tsi721_bdma_ch_free(bdma_chan);
797 }
798
799 static
tsi721_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)800 enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
801 struct dma_tx_state *txstate)
802 {
803 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
804 enum dma_status status;
805
806 spin_lock_bh(&bdma_chan->lock);
807 status = dma_cookie_status(dchan, cookie, txstate);
808 spin_unlock_bh(&bdma_chan->lock);
809 return status;
810 }
811
tsi721_issue_pending(struct dma_chan * dchan)812 static void tsi721_issue_pending(struct dma_chan *dchan)
813 {
814 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
815
816 tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
817
818 spin_lock_bh(&bdma_chan->lock);
819 if (tsi721_dma_is_idle(bdma_chan) && bdma_chan->active) {
820 tsi721_advance_work(bdma_chan, NULL);
821 }
822 spin_unlock_bh(&bdma_chan->lock);
823 }
824
825 static
tsi721_prep_rio_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * tinfo)826 struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
827 struct scatterlist *sgl, unsigned int sg_len,
828 enum dma_transfer_direction dir, unsigned long flags,
829 void *tinfo)
830 {
831 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
832 struct tsi721_tx_desc *desc;
833 struct rio_dma_ext *rext = tinfo;
834 enum dma_rtype rtype;
835 struct dma_async_tx_descriptor *txd = NULL;
836
837 if (!sgl || !sg_len) {
838 tsi_err(&dchan->dev->device, "DMAC%d No SG list",
839 bdma_chan->id);
840 return ERR_PTR(-EINVAL);
841 }
842
843 tsi_debug(DMA, &dchan->dev->device, "DMAC%d %s", bdma_chan->id,
844 (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
845
846 if (dir == DMA_DEV_TO_MEM)
847 rtype = NREAD;
848 else if (dir == DMA_MEM_TO_DEV) {
849 switch (rext->wr_type) {
850 case RDW_ALL_NWRITE:
851 rtype = ALL_NWRITE;
852 break;
853 case RDW_ALL_NWRITE_R:
854 rtype = ALL_NWRITE_R;
855 break;
856 case RDW_LAST_NWRITE_R:
857 default:
858 rtype = LAST_NWRITE_R;
859 break;
860 }
861 } else {
862 tsi_err(&dchan->dev->device,
863 "DMAC%d Unsupported DMA direction option",
864 bdma_chan->id);
865 return ERR_PTR(-EINVAL);
866 }
867
868 spin_lock_bh(&bdma_chan->lock);
869
870 if (!list_empty(&bdma_chan->free_list)) {
871 desc = list_first_entry(&bdma_chan->free_list,
872 struct tsi721_tx_desc, desc_node);
873 list_del_init(&desc->desc_node);
874 desc->destid = rext->destid;
875 desc->rio_addr = rext->rio_addr;
876 desc->rio_addr_u = 0;
877 desc->rtype = rtype;
878 desc->sg_len = sg_len;
879 desc->sg = sgl;
880 txd = &desc->txd;
881 txd->flags = flags;
882 }
883
884 spin_unlock_bh(&bdma_chan->lock);
885
886 if (!txd) {
887 tsi_debug(DMA, &dchan->dev->device,
888 "DMAC%d free TXD is not available", bdma_chan->id);
889 return ERR_PTR(-EBUSY);
890 }
891
892 return txd;
893 }
894
tsi721_terminate_all(struct dma_chan * dchan)895 static int tsi721_terminate_all(struct dma_chan *dchan)
896 {
897 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
898 struct tsi721_tx_desc *desc, *_d;
899 LIST_HEAD(list);
900
901 tsi_debug(DMA, &dchan->dev->device, "DMAC%d", bdma_chan->id);
902
903 spin_lock_bh(&bdma_chan->lock);
904
905 bdma_chan->active = false;
906
907 while (!tsi721_dma_is_idle(bdma_chan)) {
908
909 udelay(5);
910 #if (0)
911 /* make sure to stop the transfer */
912 iowrite32(TSI721_DMAC_CTL_SUSP,
913 bdma_chan->regs + TSI721_DMAC_CTL);
914
915 /* Wait until DMA channel stops */
916 do {
917 dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
918 } while ((dmac_int & TSI721_DMAC_INT_SUSP) == 0);
919 #endif
920 }
921
922 if (bdma_chan->active_tx)
923 list_add(&bdma_chan->active_tx->desc_node, &list);
924 list_splice_init(&bdma_chan->queue, &list);
925
926 list_for_each_entry_safe(desc, _d, &list, desc_node)
927 tsi721_dma_tx_err(bdma_chan, desc);
928
929 spin_unlock_bh(&bdma_chan->lock);
930
931 return 0;
932 }
933
tsi721_dma_stop(struct tsi721_bdma_chan * bdma_chan)934 static void tsi721_dma_stop(struct tsi721_bdma_chan *bdma_chan)
935 {
936 if (!bdma_chan->active)
937 return;
938 spin_lock_bh(&bdma_chan->lock);
939 if (!tsi721_dma_is_idle(bdma_chan)) {
940 int timeout = 100000;
941
942 /* stop the transfer in progress */
943 iowrite32(TSI721_DMAC_CTL_SUSP,
944 bdma_chan->regs + TSI721_DMAC_CTL);
945
946 /* Wait until DMA channel stops */
947 while (!tsi721_dma_is_idle(bdma_chan) && --timeout)
948 udelay(1);
949 }
950
951 spin_unlock_bh(&bdma_chan->lock);
952 }
953
tsi721_dma_stop_all(struct tsi721_device * priv)954 void tsi721_dma_stop_all(struct tsi721_device *priv)
955 {
956 int i;
957
958 for (i = 0; i < TSI721_DMA_MAXCH; i++) {
959 if ((i != TSI721_DMACH_MAINT) && (dma_sel & (1 << i)))
960 tsi721_dma_stop(&priv->bdma[i]);
961 }
962 }
963
tsi721_register_dma(struct tsi721_device * priv)964 int tsi721_register_dma(struct tsi721_device *priv)
965 {
966 int i;
967 int nr_channels = 0;
968 int err;
969 struct rio_mport *mport = &priv->mport;
970
971 INIT_LIST_HEAD(&mport->dma.channels);
972
973 for (i = 0; i < TSI721_DMA_MAXCH; i++) {
974 struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
975
976 if ((i == TSI721_DMACH_MAINT) || (dma_sel & (1 << i)) == 0)
977 continue;
978
979 bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
980
981 bdma_chan->dchan.device = &mport->dma;
982 bdma_chan->dchan.cookie = 1;
983 bdma_chan->dchan.chan_id = i;
984 bdma_chan->id = i;
985 bdma_chan->active = false;
986
987 spin_lock_init(&bdma_chan->lock);
988
989 bdma_chan->active_tx = NULL;
990 INIT_LIST_HEAD(&bdma_chan->queue);
991 INIT_LIST_HEAD(&bdma_chan->free_list);
992
993 tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
994 (unsigned long)bdma_chan);
995 list_add_tail(&bdma_chan->dchan.device_node,
996 &mport->dma.channels);
997 nr_channels++;
998 }
999
1000 mport->dma.chancnt = nr_channels;
1001 dma_cap_zero(mport->dma.cap_mask);
1002 dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
1003 dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
1004
1005 mport->dma.dev = &priv->pdev->dev;
1006 mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
1007 mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
1008 mport->dma.device_tx_status = tsi721_tx_status;
1009 mport->dma.device_issue_pending = tsi721_issue_pending;
1010 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
1011 mport->dma.device_terminate_all = tsi721_terminate_all;
1012
1013 err = dma_async_device_register(&mport->dma);
1014 if (err)
1015 tsi_err(&priv->pdev->dev, "Failed to register DMA device");
1016
1017 return err;
1018 }
1019
tsi721_unregister_dma(struct tsi721_device * priv)1020 void tsi721_unregister_dma(struct tsi721_device *priv)
1021 {
1022 struct rio_mport *mport = &priv->mport;
1023 struct dma_chan *chan, *_c;
1024 struct tsi721_bdma_chan *bdma_chan;
1025
1026 tsi721_dma_stop_all(priv);
1027 dma_async_device_unregister(&mport->dma);
1028
1029 list_for_each_entry_safe(chan, _c, &mport->dma.channels,
1030 device_node) {
1031 bdma_chan = to_tsi721_chan(chan);
1032 if (bdma_chan->active) {
1033 tsi721_bdma_interrupt_enable(bdma_chan, 0);
1034 bdma_chan->active = false;
1035 tsi721_sync_dma_irq(bdma_chan);
1036 tasklet_kill(&bdma_chan->tasklet);
1037 INIT_LIST_HEAD(&bdma_chan->free_list);
1038 kfree(bdma_chan->tx_desc);
1039 tsi721_bdma_ch_free(bdma_chan);
1040 }
1041
1042 list_del(&chan->device_node);
1043 }
1044 }
1045