xref: /linux/drivers/media/platform/nvidia/tegra-vde/dmabuf-cache.c (revision 7ae9fb1b7ecbb5d85d07857943f677fd1a559b18)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * NVIDIA Tegra Video decoder driver
4  *
5  * Copyright (C) 2016-2019 GRATE-DRIVER project
6  */
7 
8 #include <linux/dma-buf.h>
9 #include <linux/iova.h>
10 #include <linux/kernel.h>
11 #include <linux/list.h>
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/workqueue.h>
15 #include <linux/module.h>
16 
17 #include "vde.h"
18 
19 MODULE_IMPORT_NS(DMA_BUF);
20 
21 struct tegra_vde_cache_entry {
22 	enum dma_data_direction dma_dir;
23 	struct dma_buf_attachment *a;
24 	struct delayed_work dwork;
25 	struct tegra_vde *vde;
26 	struct list_head list;
27 	struct sg_table *sgt;
28 	struct iova *iova;
29 	unsigned int refcnt;
30 };
31 
tegra_vde_release_entry(struct tegra_vde_cache_entry * entry)32 static void tegra_vde_release_entry(struct tegra_vde_cache_entry *entry)
33 {
34 	struct dma_buf *dmabuf = entry->a->dmabuf;
35 
36 	WARN_ON_ONCE(entry->refcnt);
37 
38 	if (entry->vde->domain)
39 		tegra_vde_iommu_unmap(entry->vde, entry->iova);
40 
41 	dma_buf_unmap_attachment_unlocked(entry->a, entry->sgt, entry->dma_dir);
42 	dma_buf_detach(dmabuf, entry->a);
43 	dma_buf_put(dmabuf);
44 
45 	list_del(&entry->list);
46 	kfree(entry);
47 }
48 
tegra_vde_delayed_unmap(struct work_struct * work)49 static void tegra_vde_delayed_unmap(struct work_struct *work)
50 {
51 	struct tegra_vde_cache_entry *entry;
52 	struct tegra_vde *vde;
53 
54 	entry = container_of(work, struct tegra_vde_cache_entry,
55 			     dwork.work);
56 	vde = entry->vde;
57 
58 	mutex_lock(&vde->map_lock);
59 	tegra_vde_release_entry(entry);
60 	mutex_unlock(&vde->map_lock);
61 }
62 
tegra_vde_dmabuf_cache_map(struct tegra_vde * vde,struct dma_buf * dmabuf,enum dma_data_direction dma_dir,struct dma_buf_attachment ** ap,dma_addr_t * addrp)63 int tegra_vde_dmabuf_cache_map(struct tegra_vde *vde,
64 			       struct dma_buf *dmabuf,
65 			       enum dma_data_direction dma_dir,
66 			       struct dma_buf_attachment **ap,
67 			       dma_addr_t *addrp)
68 {
69 	struct dma_buf_attachment *attachment;
70 	struct tegra_vde_cache_entry *entry;
71 	struct device *dev = vde->dev;
72 	struct sg_table *sgt;
73 	struct iova *iova;
74 	int err;
75 
76 	mutex_lock(&vde->map_lock);
77 
78 	list_for_each_entry(entry, &vde->map_list, list) {
79 		if (entry->a->dmabuf != dmabuf)
80 			continue;
81 
82 		if (!cancel_delayed_work(&entry->dwork))
83 			continue;
84 
85 		if (entry->dma_dir != dma_dir)
86 			entry->dma_dir = DMA_BIDIRECTIONAL;
87 
88 		dma_buf_put(dmabuf);
89 
90 		if (vde->domain)
91 			*addrp = iova_dma_addr(&vde->iova, entry->iova);
92 		else
93 			*addrp = sg_dma_address(entry->sgt->sgl);
94 
95 		goto ref;
96 	}
97 
98 	attachment = dma_buf_attach(dmabuf, dev);
99 	if (IS_ERR(attachment)) {
100 		dev_err(dev, "Failed to attach dmabuf\n");
101 		err = PTR_ERR(attachment);
102 		goto err_unlock;
103 	}
104 
105 	sgt = dma_buf_map_attachment_unlocked(attachment, dma_dir);
106 	if (IS_ERR(sgt)) {
107 		dev_err(dev, "Failed to get dmabufs sg_table\n");
108 		err = PTR_ERR(sgt);
109 		goto err_detach;
110 	}
111 
112 	if (!vde->domain && sgt->nents > 1) {
113 		dev_err(dev, "Sparse DMA region is unsupported, please enable IOMMU\n");
114 		err = -EINVAL;
115 		goto err_unmap;
116 	}
117 
118 	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
119 	if (!entry) {
120 		err = -ENOMEM;
121 		goto err_unmap;
122 	}
123 
124 	if (vde->domain) {
125 		err = tegra_vde_iommu_map(vde, sgt, &iova, dmabuf->size);
126 		if (err)
127 			goto err_free;
128 
129 		*addrp = iova_dma_addr(&vde->iova, iova);
130 	} else {
131 		*addrp = sg_dma_address(sgt->sgl);
132 		iova = NULL;
133 	}
134 
135 	INIT_DELAYED_WORK(&entry->dwork, tegra_vde_delayed_unmap);
136 	list_add(&entry->list, &vde->map_list);
137 
138 	entry->dma_dir = dma_dir;
139 	entry->iova = iova;
140 	entry->vde = vde;
141 	entry->sgt = sgt;
142 	entry->a = attachment;
143 ref:
144 	entry->refcnt++;
145 
146 	*ap = entry->a;
147 
148 	mutex_unlock(&vde->map_lock);
149 
150 	return 0;
151 
152 err_free:
153 	kfree(entry);
154 err_unmap:
155 	dma_buf_unmap_attachment_unlocked(attachment, sgt, dma_dir);
156 err_detach:
157 	dma_buf_detach(dmabuf, attachment);
158 err_unlock:
159 	mutex_unlock(&vde->map_lock);
160 
161 	return err;
162 }
163 
tegra_vde_dmabuf_cache_unmap(struct tegra_vde * vde,struct dma_buf_attachment * a,bool release)164 void tegra_vde_dmabuf_cache_unmap(struct tegra_vde *vde,
165 				  struct dma_buf_attachment *a,
166 				  bool release)
167 {
168 	struct tegra_vde_cache_entry *entry;
169 
170 	mutex_lock(&vde->map_lock);
171 
172 	list_for_each_entry(entry, &vde->map_list, list) {
173 		if (entry->a != a)
174 			continue;
175 
176 		WARN_ON_ONCE(!entry->refcnt);
177 
178 		if (--entry->refcnt == 0) {
179 			if (release)
180 				tegra_vde_release_entry(entry);
181 			else
182 				schedule_delayed_work(&entry->dwork, 5 * HZ);
183 		}
184 		break;
185 	}
186 
187 	mutex_unlock(&vde->map_lock);
188 }
189 
tegra_vde_dmabuf_cache_unmap_sync(struct tegra_vde * vde)190 void tegra_vde_dmabuf_cache_unmap_sync(struct tegra_vde *vde)
191 {
192 	struct tegra_vde_cache_entry *entry, *tmp;
193 
194 	mutex_lock(&vde->map_lock);
195 
196 	list_for_each_entry_safe(entry, tmp, &vde->map_list, list) {
197 		if (entry->refcnt)
198 			continue;
199 
200 		if (!cancel_delayed_work(&entry->dwork))
201 			continue;
202 
203 		tegra_vde_release_entry(entry);
204 	}
205 
206 	mutex_unlock(&vde->map_lock);
207 }
208 
tegra_vde_dmabuf_cache_unmap_all(struct tegra_vde * vde)209 void tegra_vde_dmabuf_cache_unmap_all(struct tegra_vde *vde)
210 {
211 	struct tegra_vde_cache_entry *entry, *tmp;
212 
213 	mutex_lock(&vde->map_lock);
214 
215 	while (!list_empty(&vde->map_list)) {
216 		list_for_each_entry_safe(entry, tmp, &vde->map_list, list) {
217 			if (!cancel_delayed_work(&entry->dwork))
218 				continue;
219 
220 			tegra_vde_release_entry(entry);
221 		}
222 
223 		mutex_unlock(&vde->map_lock);
224 		schedule();
225 		mutex_lock(&vde->map_lock);
226 	}
227 
228 	mutex_unlock(&vde->map_lock);
229 }
230