xref: /linux/drivers/dma/virt-dma.c (revision 79828b4fa835f73cdaf4bffa48696abdcbea9d02)
1 /*
2  * Virtual DMA channel support for DMAengine
3  *
4  * Copyright (C) 2012 Russell King
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 as
8  * published by the Free Software Foundation.
9  */
10 #include <linux/device.h>
11 #include <linux/dmaengine.h>
12 #include <linux/module.h>
13 #include <linux/spinlock.h>
14 
15 #include "virt-dma.h"
16 
17 static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
18 {
19 	return container_of(tx, struct virt_dma_desc, tx);
20 }
21 
22 dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
23 {
24 	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
25 	struct virt_dma_desc *vd = to_virt_desc(tx);
26 	unsigned long flags;
27 	dma_cookie_t cookie;
28 
29 	spin_lock_irqsave(&vc->lock, flags);
30 	cookie = dma_cookie_assign(tx);
31 
32 	list_add_tail(&vd->node, &vc->desc_submitted);
33 	spin_unlock_irqrestore(&vc->lock, flags);
34 
35 	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
36 		vc, vd, cookie);
37 
38 	return cookie;
39 }
40 EXPORT_SYMBOL_GPL(vchan_tx_submit);
41 
42 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
43 	dma_cookie_t cookie)
44 {
45 	struct virt_dma_desc *vd;
46 
47 	list_for_each_entry(vd, &vc->desc_issued, node)
48 		if (vd->tx.cookie == cookie)
49 			return vd;
50 
51 	return NULL;
52 }
53 EXPORT_SYMBOL_GPL(vchan_find_desc);
54 
55 /*
56  * This tasklet handles the completion of a DMA descriptor by
57  * calling its callback and freeing it.
58  */
59 static void vchan_complete(unsigned long arg)
60 {
61 	struct virt_dma_chan *vc = (struct virt_dma_chan *)arg;
62 	struct virt_dma_desc *vd;
63 	dma_async_tx_callback cb = NULL;
64 	void *cb_data = NULL;
65 	LIST_HEAD(head);
66 
67 	spin_lock_irq(&vc->lock);
68 	list_splice_tail_init(&vc->desc_completed, &head);
69 	vd = vc->cyclic;
70 	if (vd) {
71 		vc->cyclic = NULL;
72 		cb = vd->tx.callback;
73 		cb_data = vd->tx.callback_param;
74 	}
75 	spin_unlock_irq(&vc->lock);
76 
77 	if (cb)
78 		cb(cb_data);
79 
80 	while (!list_empty(&head)) {
81 		vd = list_first_entry(&head, struct virt_dma_desc, node);
82 		cb = vd->tx.callback;
83 		cb_data = vd->tx.callback_param;
84 
85 		list_del(&vd->node);
86 
87 		vc->desc_free(vd);
88 
89 		if (cb)
90 			cb(cb_data);
91 	}
92 }
93 
94 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
95 {
96 	while (!list_empty(head)) {
97 		struct virt_dma_desc *vd = list_first_entry(head,
98 			struct virt_dma_desc, node);
99 		list_del(&vd->node);
100 		dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
101 		vc->desc_free(vd);
102 	}
103 }
104 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
105 
106 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
107 {
108 	dma_cookie_init(&vc->chan);
109 
110 	spin_lock_init(&vc->lock);
111 	INIT_LIST_HEAD(&vc->desc_submitted);
112 	INIT_LIST_HEAD(&vc->desc_issued);
113 	INIT_LIST_HEAD(&vc->desc_completed);
114 
115 	tasklet_init(&vc->task, vchan_complete, (unsigned long)vc);
116 
117 	vc->chan.device = dmadev;
118 	list_add_tail(&vc->chan.device_node, &dmadev->channels);
119 }
120 EXPORT_SYMBOL_GPL(vchan_init);
121 
122 MODULE_AUTHOR("Russell King");
123 MODULE_LICENSE("GPL");
124