xref: /linux/drivers/dma/virt-dma.c (revision 0c8a32eed1625a65798286fb73fea8710a908545)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Virtual DMA channel support for DMAengine
4  *
5  * Copyright (C) 2012 Russell King
6  */
7 #include <linux/device.h>
8 #include <linux/dmaengine.h>
9 #include <linux/module.h>
10 #include <linux/spinlock.h>
11 
12 #include "virt-dma.h"
13 
14 static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
15 {
16 	return container_of(tx, struct virt_dma_desc, tx);
17 }
18 
19 dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
20 {
21 	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
22 	struct virt_dma_desc *vd = to_virt_desc(tx);
23 	unsigned long flags;
24 	dma_cookie_t cookie;
25 
26 	spin_lock_irqsave(&vc->lock, flags);
27 	cookie = dma_cookie_assign(tx);
28 
29 	list_move_tail(&vd->node, &vc->desc_submitted);
30 	spin_unlock_irqrestore(&vc->lock, flags);
31 
32 	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
33 		vc, vd, cookie);
34 
35 	return cookie;
36 }
37 EXPORT_SYMBOL_GPL(vchan_tx_submit);
38 
39 /**
40  * vchan_tx_desc_free - free a reusable descriptor
41  * @tx: the transfer
42  *
43  * This function frees a previously allocated reusable descriptor. The only
44  * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
45  * transfer.
46  *
47  * Returns 0 upon success
48  */
49 int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
50 {
51 	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
52 	struct virt_dma_desc *vd = to_virt_desc(tx);
53 	unsigned long flags;
54 
55 	spin_lock_irqsave(&vc->lock, flags);
56 	list_del(&vd->node);
57 	spin_unlock_irqrestore(&vc->lock, flags);
58 
59 	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
60 		vc, vd, vd->tx.cookie);
61 	vc->desc_free(vd);
62 	return 0;
63 }
64 EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
65 
66 struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
67 	dma_cookie_t cookie)
68 {
69 	struct virt_dma_desc *vd;
70 
71 	list_for_each_entry(vd, &vc->desc_issued, node)
72 		if (vd->tx.cookie == cookie)
73 			return vd;
74 
75 	return NULL;
76 }
77 EXPORT_SYMBOL_GPL(vchan_find_desc);
78 
79 /*
80  * This tasklet handles the completion of a DMA descriptor by
81  * calling its callback and freeing it.
82  */
83 static void vchan_complete(struct tasklet_struct *t)
84 {
85 	struct virt_dma_chan *vc = from_tasklet(vc, t, task);
86 	struct virt_dma_desc *vd, *_vd;
87 	struct dmaengine_desc_callback cb;
88 	LIST_HEAD(head);
89 
90 	spin_lock_irq(&vc->lock);
91 	list_splice_tail_init(&vc->desc_completed, &head);
92 	vd = vc->cyclic;
93 	if (vd) {
94 		vc->cyclic = NULL;
95 		dmaengine_desc_get_callback(&vd->tx, &cb);
96 	} else {
97 		memset(&cb, 0, sizeof(cb));
98 	}
99 	spin_unlock_irq(&vc->lock);
100 
101 	dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
102 
103 	list_for_each_entry_safe(vd, _vd, &head, node) {
104 		dmaengine_desc_get_callback(&vd->tx, &cb);
105 
106 		list_del(&vd->node);
107 		dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
108 		vchan_vdesc_fini(vd);
109 	}
110 }
111 
112 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
113 {
114 	struct virt_dma_desc *vd, *_vd;
115 
116 	list_for_each_entry_safe(vd, _vd, head, node) {
117 		list_del(&vd->node);
118 		vchan_vdesc_fini(vd);
119 	}
120 }
121 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
122 
123 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
124 {
125 	dma_cookie_init(&vc->chan);
126 
127 	spin_lock_init(&vc->lock);
128 	INIT_LIST_HEAD(&vc->desc_allocated);
129 	INIT_LIST_HEAD(&vc->desc_submitted);
130 	INIT_LIST_HEAD(&vc->desc_issued);
131 	INIT_LIST_HEAD(&vc->desc_completed);
132 	INIT_LIST_HEAD(&vc->desc_terminated);
133 
134 	tasklet_setup(&vc->task, vchan_complete);
135 
136 	vc->chan.device = dmadev;
137 	list_add_tail(&vc->chan.device_node, &dmadev->channels);
138 }
139 EXPORT_SYMBOL_GPL(vchan_init);
140 
141 MODULE_AUTHOR("Russell King");
142 MODULE_LICENSE("GPL");
143