1 /* 2 * Virtual DMA channel support for DMAengine 3 * 4 * Copyright (C) 2012 Russell King 5 * 6 * This program is free software; you can redistribute it and/or modify 7 * it under the terms of the GNU General Public License version 2 as 8 * published by the Free Software Foundation. 9 */ 10 #include <linux/device.h> 11 #include <linux/dmaengine.h> 12 #include <linux/module.h> 13 #include <linux/spinlock.h> 14 15 #include "virt-dma.h" 16 17 static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx) 18 { 19 return container_of(tx, struct virt_dma_desc, tx); 20 } 21 22 dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx) 23 { 24 struct virt_dma_chan *vc = to_virt_chan(tx->chan); 25 struct virt_dma_desc *vd = to_virt_desc(tx); 26 unsigned long flags; 27 dma_cookie_t cookie; 28 29 spin_lock_irqsave(&vc->lock, flags); 30 cookie = dma_cookie_assign(tx); 31 32 list_move_tail(&vd->node, &vc->desc_submitted); 33 spin_unlock_irqrestore(&vc->lock, flags); 34 35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", 36 vc, vd, cookie); 37 38 return cookie; 39 } 40 EXPORT_SYMBOL_GPL(vchan_tx_submit); 41 42 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc, 43 dma_cookie_t cookie) 44 { 45 struct virt_dma_desc *vd; 46 47 list_for_each_entry(vd, &vc->desc_issued, node) 48 if (vd->tx.cookie == cookie) 49 return vd; 50 51 return NULL; 52 } 53 EXPORT_SYMBOL_GPL(vchan_find_desc); 54 55 /* 56 * This tasklet handles the completion of a DMA descriptor by 57 * calling its callback and freeing it. 58 */ 59 static void vchan_complete(unsigned long arg) 60 { 61 struct virt_dma_chan *vc = (struct virt_dma_chan *)arg; 62 struct virt_dma_desc *vd; 63 dma_async_tx_callback cb = NULL; 64 void *cb_data = NULL; 65 LIST_HEAD(head); 66 67 spin_lock_irq(&vc->lock); 68 list_splice_tail_init(&vc->desc_completed, &head); 69 vd = vc->cyclic; 70 if (vd) { 71 vc->cyclic = NULL; 72 cb = vd->tx.callback; 73 cb_data = vd->tx.callback_param; 74 } 75 spin_unlock_irq(&vc->lock); 76 77 if (cb) 78 cb(cb_data); 79 80 while (!list_empty(&head)) { 81 vd = list_first_entry(&head, struct virt_dma_desc, node); 82 cb = vd->tx.callback; 83 cb_data = vd->tx.callback_param; 84 85 list_del(&vd->node); 86 if (async_tx_test_ack(&vd->tx)) 87 list_add(&vd->node, &vc->desc_allocated); 88 else 89 vc->desc_free(vd); 90 91 if (cb) 92 cb(cb_data); 93 } 94 } 95 96 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head) 97 { 98 while (!list_empty(head)) { 99 struct virt_dma_desc *vd = list_first_entry(head, 100 struct virt_dma_desc, node); 101 if (async_tx_test_ack(&vd->tx)) { 102 list_move_tail(&vd->node, &vc->desc_allocated); 103 } else { 104 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd); 105 list_del(&vd->node); 106 vc->desc_free(vd); 107 } 108 } 109 } 110 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); 111 112 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev) 113 { 114 dma_cookie_init(&vc->chan); 115 116 spin_lock_init(&vc->lock); 117 INIT_LIST_HEAD(&vc->desc_allocated); 118 INIT_LIST_HEAD(&vc->desc_submitted); 119 INIT_LIST_HEAD(&vc->desc_issued); 120 INIT_LIST_HEAD(&vc->desc_completed); 121 122 tasklet_init(&vc->task, vchan_complete, (unsigned long)vc); 123 124 vc->chan.device = dmadev; 125 list_add_tail(&vc->chan.device_node, &dmadev->channels); 126 } 127 EXPORT_SYMBOL_GPL(vchan_init); 128 129 MODULE_AUTHOR("Russell King"); 130 MODULE_LICENSE("GPL"); 131