xref: /linux/drivers/gpu/drm/etnaviv/etnaviv_mmu.c (revision 80d443e8876602be2c130f79c4de81e12e2a700d)
1 /*
2  * Copyright (C) 2015 Etnaviv Project
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms of the GNU General Public License version 2 as published by
6  * the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program.  If not, see <http://www.gnu.org/licenses/>.
15  */
16 
17 #include "common.xml.h"
18 #include "etnaviv_drv.h"
19 #include "etnaviv_gem.h"
20 #include "etnaviv_gpu.h"
21 #include "etnaviv_iommu.h"
22 #include "etnaviv_mmu.h"
23 
24 static int etnaviv_fault_handler(struct iommu_domain *iommu, struct device *dev,
25 		unsigned long iova, int flags, void *arg)
26 {
27 	DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
28 	return 0;
29 }
30 
31 int etnaviv_iommu_map(struct etnaviv_iommu *iommu, u32 iova,
32 		struct sg_table *sgt, unsigned len, int prot)
33 {
34 	struct iommu_domain *domain = iommu->domain;
35 	struct scatterlist *sg;
36 	unsigned int da = iova;
37 	unsigned int i, j;
38 	int ret;
39 
40 	if (!domain || !sgt)
41 		return -EINVAL;
42 
43 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
44 		u32 pa = sg_dma_address(sg) - sg->offset;
45 		size_t bytes = sg_dma_len(sg) + sg->offset;
46 
47 		VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
48 
49 		ret = iommu_map(domain, da, pa, bytes, prot);
50 		if (ret)
51 			goto fail;
52 
53 		da += bytes;
54 	}
55 
56 	return 0;
57 
58 fail:
59 	da = iova;
60 
61 	for_each_sg(sgt->sgl, sg, i, j) {
62 		size_t bytes = sg_dma_len(sg) + sg->offset;
63 
64 		iommu_unmap(domain, da, bytes);
65 		da += bytes;
66 	}
67 	return ret;
68 }
69 
70 int etnaviv_iommu_unmap(struct etnaviv_iommu *iommu, u32 iova,
71 		struct sg_table *sgt, unsigned len)
72 {
73 	struct iommu_domain *domain = iommu->domain;
74 	struct scatterlist *sg;
75 	unsigned int da = iova;
76 	int i;
77 
78 	for_each_sg(sgt->sgl, sg, sgt->nents, i) {
79 		size_t bytes = sg_dma_len(sg) + sg->offset;
80 		size_t unmapped;
81 
82 		unmapped = iommu_unmap(domain, da, bytes);
83 		if (unmapped < bytes)
84 			return unmapped;
85 
86 		VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
87 
88 		BUG_ON(!PAGE_ALIGNED(bytes));
89 
90 		da += bytes;
91 	}
92 
93 	return 0;
94 }
95 
96 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu,
97 	struct etnaviv_vram_mapping *mapping)
98 {
99 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
100 
101 	etnaviv_iommu_unmap(mmu, mapping->vram_node.start,
102 			    etnaviv_obj->sgt, etnaviv_obj->base.size);
103 	drm_mm_remove_node(&mapping->vram_node);
104 }
105 
106 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu,
107 				   struct drm_mm_node *node, size_t size)
108 {
109 	struct etnaviv_vram_mapping *free = NULL;
110 	int ret;
111 
112 	lockdep_assert_held(&mmu->lock);
113 
114 	while (1) {
115 		struct etnaviv_vram_mapping *m, *n;
116 		struct list_head list;
117 		bool found;
118 
119 		ret = drm_mm_insert_node_in_range(&mmu->mm, node,
120 			size, 0, mmu->last_iova, ~0UL,
121 			DRM_MM_SEARCH_DEFAULT);
122 
123 		if (ret != -ENOSPC)
124 			break;
125 
126 		/*
127 		 * If we did not search from the start of the MMU region,
128 		 * try again in case there are free slots.
129 		 */
130 		if (mmu->last_iova) {
131 			mmu->last_iova = 0;
132 			mmu->need_flush = true;
133 			continue;
134 		}
135 
136 		/* Try to retire some entries */
137 		drm_mm_init_scan(&mmu->mm, size, 0, 0);
138 
139 		found = 0;
140 		INIT_LIST_HEAD(&list);
141 		list_for_each_entry(free, &mmu->mappings, mmu_node) {
142 			/* If this vram node has not been used, skip this. */
143 			if (!free->vram_node.mm)
144 				continue;
145 
146 			/*
147 			 * If the iova is pinned, then it's in-use,
148 			 * so we must keep its mapping.
149 			 */
150 			if (free->use)
151 				continue;
152 
153 			list_add(&free->scan_node, &list);
154 			if (drm_mm_scan_add_block(&free->vram_node)) {
155 				found = true;
156 				break;
157 			}
158 		}
159 
160 		if (!found) {
161 			/* Nothing found, clean up and fail */
162 			list_for_each_entry_safe(m, n, &list, scan_node)
163 				BUG_ON(drm_mm_scan_remove_block(&m->vram_node));
164 			break;
165 		}
166 
167 		/*
168 		 * drm_mm does not allow any other operations while
169 		 * scanning, so we have to remove all blocks first.
170 		 * If drm_mm_scan_remove_block() returns false, we
171 		 * can leave the block pinned.
172 		 */
173 		list_for_each_entry_safe(m, n, &list, scan_node)
174 			if (!drm_mm_scan_remove_block(&m->vram_node))
175 				list_del_init(&m->scan_node);
176 
177 		/*
178 		 * Unmap the blocks which need to be reaped from the MMU.
179 		 * Clear the mmu pointer to prevent the mapping_get finding
180 		 * this mapping.
181 		 */
182 		list_for_each_entry_safe(m, n, &list, scan_node) {
183 			etnaviv_iommu_remove_mapping(mmu, m);
184 			m->mmu = NULL;
185 			list_del_init(&m->mmu_node);
186 			list_del_init(&m->scan_node);
187 		}
188 
189 		/*
190 		 * We removed enough mappings so that the new allocation will
191 		 * succeed.  Ensure that the MMU will be flushed before the
192 		 * associated commit requesting this mapping, and retry the
193 		 * allocation one more time.
194 		 */
195 		mmu->need_flush = true;
196 	}
197 
198 	return ret;
199 }
200 
201 int etnaviv_iommu_map_gem(struct etnaviv_iommu *mmu,
202 	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
203 	struct etnaviv_vram_mapping *mapping)
204 {
205 	struct sg_table *sgt = etnaviv_obj->sgt;
206 	struct drm_mm_node *node;
207 	int ret;
208 
209 	lockdep_assert_held(&etnaviv_obj->lock);
210 
211 	mutex_lock(&mmu->lock);
212 
213 	/* v1 MMU can optimize single entry (contiguous) scatterlists */
214 	if (mmu->version == ETNAVIV_IOMMU_V1 &&
215 	    sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
216 		u32 iova;
217 
218 		iova = sg_dma_address(sgt->sgl) - memory_base;
219 		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
220 			mapping->iova = iova;
221 			list_add_tail(&mapping->mmu_node, &mmu->mappings);
222 			mutex_unlock(&mmu->lock);
223 			return 0;
224 		}
225 	}
226 
227 	node = &mapping->vram_node;
228 
229 	ret = etnaviv_iommu_find_iova(mmu, node, etnaviv_obj->base.size);
230 	if (ret < 0) {
231 		mutex_unlock(&mmu->lock);
232 		return ret;
233 	}
234 
235 	mmu->last_iova = node->start + etnaviv_obj->base.size;
236 	mapping->iova = node->start;
237 	ret = etnaviv_iommu_map(mmu, node->start, sgt, etnaviv_obj->base.size,
238 				IOMMU_READ | IOMMU_WRITE);
239 
240 	if (ret < 0) {
241 		drm_mm_remove_node(node);
242 		mutex_unlock(&mmu->lock);
243 		return ret;
244 	}
245 
246 	list_add_tail(&mapping->mmu_node, &mmu->mappings);
247 	mutex_unlock(&mmu->lock);
248 
249 	return ret;
250 }
251 
252 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu *mmu,
253 	struct etnaviv_vram_mapping *mapping)
254 {
255 	WARN_ON(mapping->use);
256 
257 	mutex_lock(&mmu->lock);
258 
259 	/* If the vram node is on the mm, unmap and remove the node */
260 	if (mapping->vram_node.mm == &mmu->mm)
261 		etnaviv_iommu_remove_mapping(mmu, mapping);
262 
263 	list_del(&mapping->mmu_node);
264 	mutex_unlock(&mmu->lock);
265 }
266 
267 void etnaviv_iommu_destroy(struct etnaviv_iommu *mmu)
268 {
269 	drm_mm_takedown(&mmu->mm);
270 	iommu_domain_free(mmu->domain);
271 	kfree(mmu);
272 }
273 
274 struct etnaviv_iommu *etnaviv_iommu_new(struct etnaviv_gpu *gpu)
275 {
276 	enum etnaviv_iommu_version version;
277 	struct etnaviv_iommu *mmu;
278 
279 	mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
280 	if (!mmu)
281 		return ERR_PTR(-ENOMEM);
282 
283 	if (!(gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)) {
284 		mmu->domain = etnaviv_iommuv1_domain_alloc(gpu);
285 		version = ETNAVIV_IOMMU_V1;
286 	} else {
287 		mmu->domain = etnaviv_iommuv2_domain_alloc(gpu);
288 		version = ETNAVIV_IOMMU_V2;
289 	}
290 
291 	if (!mmu->domain) {
292 		dev_err(gpu->dev, "Failed to allocate GPU IOMMU domain\n");
293 		kfree(mmu);
294 		return ERR_PTR(-ENOMEM);
295 	}
296 
297 	mmu->gpu = gpu;
298 	mmu->version = version;
299 	mutex_init(&mmu->lock);
300 	INIT_LIST_HEAD(&mmu->mappings);
301 
302 	drm_mm_init(&mmu->mm, mmu->domain->geometry.aperture_start,
303 		    mmu->domain->geometry.aperture_end -
304 		    mmu->domain->geometry.aperture_start + 1);
305 
306 	iommu_set_fault_handler(mmu->domain, etnaviv_fault_handler, gpu->dev);
307 
308 	return mmu;
309 }
310 
311 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu)
312 {
313 	if (gpu->mmu->version == ETNAVIV_IOMMU_V1)
314 		etnaviv_iommuv1_restore(gpu);
315 	else
316 		etnaviv_iommuv2_restore(gpu);
317 }
318 
319 u32 etnaviv_iommu_get_cmdbuf_va(struct etnaviv_gpu *gpu,
320 				struct etnaviv_cmdbuf *buf)
321 {
322 	struct etnaviv_iommu *mmu = gpu->mmu;
323 
324 	if (mmu->version == ETNAVIV_IOMMU_V1) {
325 		return buf->paddr - gpu->memory_base;
326 	} else {
327 		int ret;
328 
329 		if (buf->vram_node.allocated)
330 			return (u32)buf->vram_node.start;
331 
332 		mutex_lock(&mmu->lock);
333 		ret = etnaviv_iommu_find_iova(mmu, &buf->vram_node,
334 					      buf->size + SZ_64K);
335 		if (ret < 0) {
336 			mutex_unlock(&mmu->lock);
337 			return 0;
338 		}
339 		ret = iommu_map(mmu->domain, buf->vram_node.start, buf->paddr,
340 				buf->size, IOMMU_READ);
341 		if (ret < 0) {
342 			drm_mm_remove_node(&buf->vram_node);
343 			mutex_unlock(&mmu->lock);
344 			return 0;
345 		}
346 		/*
347 		 * At least on GC3000 the FE MMU doesn't properly flush old TLB
348 		 * entries. Make sure to space the command buffers out in a way
349 		 * that the FE MMU prefetch won't load invalid entries.
350 		 */
351 		mmu->last_iova = buf->vram_node.start + buf->size + SZ_64K;
352 		gpu->mmu->need_flush = true;
353 		mutex_unlock(&mmu->lock);
354 
355 		return (u32)buf->vram_node.start;
356 	}
357 }
358 
359 void etnaviv_iommu_put_cmdbuf_va(struct etnaviv_gpu *gpu,
360 				 struct etnaviv_cmdbuf *buf)
361 {
362 	struct etnaviv_iommu *mmu = gpu->mmu;
363 
364 	if (mmu->version == ETNAVIV_IOMMU_V2 && buf->vram_node.allocated) {
365 		mutex_lock(&mmu->lock);
366 		iommu_unmap(mmu->domain, buf->vram_node.start, buf->size);
367 		drm_mm_remove_node(&buf->vram_node);
368 		mutex_unlock(&mmu->lock);
369 	}
370 }
371 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu *iommu)
372 {
373 	struct etnaviv_iommu_ops *ops;
374 
375 	ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
376 
377 	return ops->dump_size(iommu->domain);
378 }
379 
380 void etnaviv_iommu_dump(struct etnaviv_iommu *iommu, void *buf)
381 {
382 	struct etnaviv_iommu_ops *ops;
383 
384 	ops = container_of(iommu->domain->ops, struct etnaviv_iommu_ops, ops);
385 
386 	ops->dump(iommu->domain, buf);
387 }
388