xref: /linux/drivers/gpu/drm/etnaviv/etnaviv_mmu.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5 
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8 
9 #include <drm/drm_print.h>
10 
11 #include "common.xml.h"
12 #include "etnaviv_cmdbuf.h"
13 #include "etnaviv_drv.h"
14 #include "etnaviv_gem.h"
15 #include "etnaviv_gpu.h"
16 #include "etnaviv_mmu.h"
17 
18 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
19 				 unsigned long iova, size_t size)
20 {
21 	size_t unmapped_page, unmapped = 0;
22 	size_t pgsize = SZ_4K;
23 
24 	while (unmapped < size) {
25 		unmapped_page = context->global->ops->unmap(context, iova,
26 							    pgsize);
27 		if (!unmapped_page)
28 			break;
29 
30 		iova += unmapped_page;
31 		unmapped += unmapped_page;
32 	}
33 }
34 
35 static int etnaviv_context_map(struct etnaviv_iommu_context *context,
36 			      unsigned long iova, phys_addr_t paddr,
37 			      size_t size, int prot)
38 {
39 	unsigned long orig_iova = iova;
40 	size_t pgsize = SZ_4K;
41 	size_t orig_size = size;
42 	int ret = 0;
43 
44 	while (size) {
45 		ret = context->global->ops->map(context, iova, paddr, pgsize,
46 						prot);
47 		if (ret)
48 			break;
49 
50 		iova += pgsize;
51 		paddr += pgsize;
52 		size -= pgsize;
53 	}
54 
55 	/* unroll mapping in case something went wrong */
56 	if (ret)
57 		etnaviv_context_unmap(context, orig_iova, orig_size - size);
58 
59 	return ret;
60 }
61 
62 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
63 			     u32 iova, unsigned int va_len,
64 			     struct sg_table *sgt, int prot)
65 {
66 	struct scatterlist *sg;
67 	unsigned int da = iova;
68 	unsigned int i;
69 	int ret;
70 
71 	if (!context || !sgt)
72 		return -EINVAL;
73 
74 	for_each_sgtable_dma_sg(sgt, sg, i) {
75 		phys_addr_t pa = sg_dma_address(sg);
76 		unsigned int da_len = sg_dma_len(sg);
77 		unsigned int bytes = min_t(unsigned int, da_len, va_len);
78 
79 		VERB("map[%d]: %08x %pap(%x)", i, da, &pa, bytes);
80 
81 		if (!IS_ALIGNED(iova | pa | bytes, SZ_4K)) {
82 			dev_err(context->global->dev,
83 				"unaligned: iova 0x%x pa %pa size 0x%x\n",
84 				iova, &pa, bytes);
85 			ret = -EINVAL;
86 			goto fail;
87 		}
88 
89 		ret = etnaviv_context_map(context, da, pa, bytes, prot);
90 		if (ret)
91 			goto fail;
92 
93 		va_len -= bytes;
94 		da += bytes;
95 	}
96 
97 	context->flush_seq++;
98 
99 	return 0;
100 
101 fail:
102 	etnaviv_context_unmap(context, iova, da - iova);
103 	return ret;
104 }
105 
106 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
107 				struct sg_table *sgt, unsigned len)
108 {
109 	etnaviv_context_unmap(context, iova, len);
110 
111 	context->flush_seq++;
112 }
113 
114 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
115 	struct etnaviv_vram_mapping *mapping)
116 {
117 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
118 
119 	lockdep_assert_held(&context->lock);
120 
121 	etnaviv_iommu_unmap(context, mapping->vram_node.start,
122 			    etnaviv_obj->sgt, etnaviv_obj->size);
123 	drm_mm_remove_node(&mapping->vram_node);
124 }
125 
126 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
127 {
128 	struct etnaviv_iommu_context *context = mapping->context;
129 
130 	lockdep_assert_held(&context->lock);
131 	WARN_ON(mapping->use);
132 
133 	etnaviv_iommu_remove_mapping(context, mapping);
134 	etnaviv_iommu_context_put(mapping->context);
135 	mapping->context = NULL;
136 	list_del_init(&mapping->mmu_node);
137 }
138 
139 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
140 				   struct drm_mm_node *node, size_t size)
141 {
142 	struct etnaviv_vram_mapping *free = NULL;
143 	enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
144 	int ret;
145 
146 	lockdep_assert_held(&context->lock);
147 
148 	while (1) {
149 		struct etnaviv_vram_mapping *m, *n;
150 		struct drm_mm_scan scan;
151 		struct list_head list;
152 		bool found;
153 
154 		ret = drm_mm_insert_node_in_range(&context->mm, node,
155 						  size, 0, 0, 0, U64_MAX, mode);
156 		if (ret != -ENOSPC)
157 			break;
158 
159 		/* Try to retire some entries */
160 		drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
161 
162 		found = 0;
163 		INIT_LIST_HEAD(&list);
164 		list_for_each_entry(free, &context->mappings, mmu_node) {
165 			/* If this vram node has not been used, skip this. */
166 			if (!free->vram_node.mm)
167 				continue;
168 
169 			/*
170 			 * If the iova is pinned, then it's in-use,
171 			 * so we must keep its mapping.
172 			 */
173 			if (free->use)
174 				continue;
175 
176 			list_add(&free->scan_node, &list);
177 			if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
178 				found = true;
179 				break;
180 			}
181 		}
182 
183 		if (!found) {
184 			/* Nothing found, clean up and fail */
185 			list_for_each_entry_safe(m, n, &list, scan_node)
186 				BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
187 			break;
188 		}
189 
190 		/*
191 		 * drm_mm does not allow any other operations while
192 		 * scanning, so we have to remove all blocks first.
193 		 * If drm_mm_scan_remove_block() returns false, we
194 		 * can leave the block pinned.
195 		 */
196 		list_for_each_entry_safe(m, n, &list, scan_node)
197 			if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
198 				list_del_init(&m->scan_node);
199 
200 		/*
201 		 * Unmap the blocks which need to be reaped from the MMU.
202 		 * Clear the mmu pointer to prevent the mapping_get finding
203 		 * this mapping.
204 		 */
205 		list_for_each_entry_safe(m, n, &list, scan_node) {
206 			etnaviv_iommu_reap_mapping(m);
207 			list_del_init(&m->scan_node);
208 		}
209 
210 		mode = DRM_MM_INSERT_EVICT;
211 
212 		/*
213 		 * We removed enough mappings so that the new allocation will
214 		 * succeed, retry the allocation one more time.
215 		 */
216 	}
217 
218 	return ret;
219 }
220 
221 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
222 		   struct drm_mm_node *node, size_t size, u64 va)
223 {
224 	struct etnaviv_vram_mapping *m, *n;
225 	struct drm_mm_node *scan_node;
226 	LIST_HEAD(scan_list);
227 	int ret;
228 
229 	lockdep_assert_held(&context->lock);
230 
231 	ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
232 					  va + size, DRM_MM_INSERT_LOWEST);
233 	if (ret != -ENOSPC)
234 		return ret;
235 
236 	/*
237 	 * When we can't insert the node, due to a existing mapping blocking
238 	 * the address space, there are two possible reasons:
239 	 * 1. Userspace genuinely messed up and tried to reuse address space
240 	 * before the last job using this VMA has finished executing.
241 	 * 2. The existing buffer mappings are idle, but the buffers are not
242 	 * destroyed yet (likely due to being referenced by another context) in
243 	 * which case the mappings will not be cleaned up and we must reap them
244 	 * here to make space for the new mapping.
245 	 */
246 
247 	drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
248 		m = container_of(scan_node, struct etnaviv_vram_mapping,
249 				 vram_node);
250 
251 		if (m->use)
252 			return -ENOSPC;
253 
254 		list_add(&m->scan_node, &scan_list);
255 	}
256 
257 	list_for_each_entry_safe(m, n, &scan_list, scan_node) {
258 		etnaviv_iommu_reap_mapping(m);
259 		list_del_init(&m->scan_node);
260 	}
261 
262 	return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
263 					   va + size, DRM_MM_INSERT_LOWEST);
264 }
265 
266 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
267 	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
268 	struct etnaviv_vram_mapping *mapping, u64 va)
269 {
270 	struct sg_table *sgt = etnaviv_obj->sgt;
271 	struct drm_mm_node *node;
272 	int ret;
273 
274 	lockdep_assert_held(&etnaviv_obj->lock);
275 
276 	mutex_lock(&context->lock);
277 
278 	/* v1 MMU can optimize single entry (contiguous) scatterlists */
279 	if (context->global->version == ETNAVIV_IOMMU_V1 &&
280 	    sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
281 		u32 iova;
282 
283 		iova = sg_dma_address(sgt->sgl) - memory_base;
284 		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
285 			mapping->iova = iova;
286 			mapping->context = etnaviv_iommu_context_get(context);
287 			list_add_tail(&mapping->mmu_node, &context->mappings);
288 			ret = 0;
289 			goto unlock;
290 		}
291 	}
292 
293 	node = &mapping->vram_node;
294 
295 	if (va)
296 		ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va);
297 	else
298 		ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size);
299 	if (ret < 0)
300 		goto unlock;
301 
302 	mapping->iova = node->start;
303 	ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt,
304 				ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
305 
306 	if (ret < 0) {
307 		drm_mm_remove_node(node);
308 		goto unlock;
309 	}
310 
311 	mapping->context = etnaviv_iommu_context_get(context);
312 	list_add_tail(&mapping->mmu_node, &context->mappings);
313 unlock:
314 	mutex_unlock(&context->lock);
315 
316 	return ret;
317 }
318 
319 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
320 	struct etnaviv_vram_mapping *mapping)
321 {
322 	WARN_ON(mapping->use);
323 
324 	mutex_lock(&context->lock);
325 
326 	/* Bail if the mapping has been reaped by another thread */
327 	if (!mapping->context) {
328 		mutex_unlock(&context->lock);
329 		return;
330 	}
331 
332 	/* If the vram node is on the mm, unmap and remove the node */
333 	if (mapping->vram_node.mm == &context->mm)
334 		etnaviv_iommu_remove_mapping(context, mapping);
335 
336 	list_del(&mapping->mmu_node);
337 	mutex_unlock(&context->lock);
338 	etnaviv_iommu_context_put(context);
339 }
340 
341 static void etnaviv_iommu_context_free(struct kref *kref)
342 {
343 	struct etnaviv_iommu_context *context =
344 		container_of(kref, struct etnaviv_iommu_context, refcount);
345 
346 	etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
347 	mutex_destroy(&context->lock);
348 	context->global->ops->free(context);
349 }
350 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
351 {
352 	kref_put(&context->refcount, etnaviv_iommu_context_free);
353 }
354 
355 struct etnaviv_iommu_context *
356 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
357 			   struct etnaviv_cmdbuf_suballoc *suballoc)
358 {
359 	struct etnaviv_iommu_context *ctx;
360 	int ret;
361 
362 	if (global->version == ETNAVIV_IOMMU_V1)
363 		ctx = etnaviv_iommuv1_context_alloc(global);
364 	else
365 		ctx = etnaviv_iommuv2_context_alloc(global);
366 
367 	if (!ctx)
368 		return NULL;
369 
370 	ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
371 					  global->memory_base);
372 	if (ret)
373 		goto out_free;
374 
375 	if (global->version == ETNAVIV_IOMMU_V1 &&
376 	    ctx->cmdbuf_mapping.iova > 0x80000000) {
377 		dev_err(global->dev,
378 		        "command buffer outside valid memory window\n");
379 		goto out_unmap;
380 	}
381 
382 	return ctx;
383 
384 out_unmap:
385 	etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
386 out_free:
387 	global->ops->free(ctx);
388 	return NULL;
389 }
390 
391 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
392 			   struct etnaviv_iommu_context *context)
393 {
394 	context->global->ops->restore(gpu, context);
395 }
396 
397 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
398 				  struct etnaviv_vram_mapping *mapping,
399 				  u32 memory_base, dma_addr_t paddr,
400 				  size_t size)
401 {
402 	mutex_lock(&context->lock);
403 
404 	if (mapping->use > 0) {
405 		mapping->use++;
406 		mutex_unlock(&context->lock);
407 		return 0;
408 	}
409 
410 	/*
411 	 * For MMUv1 we don't add the suballoc region to the pagetables, as
412 	 * those GPUs can only work with cmdbufs accessed through the linear
413 	 * window. Instead we manufacture a mapping to make it look uniform
414 	 * to the upper layers.
415 	 */
416 	if (context->global->version == ETNAVIV_IOMMU_V1) {
417 		mapping->iova = paddr - memory_base;
418 	} else {
419 		struct drm_mm_node *node = &mapping->vram_node;
420 		int ret;
421 
422 		ret = etnaviv_iommu_find_iova(context, node, size);
423 		if (ret < 0) {
424 			mutex_unlock(&context->lock);
425 			return ret;
426 		}
427 
428 		mapping->iova = node->start;
429 		ret = etnaviv_context_map(context, node->start, paddr, size,
430 					  ETNAVIV_PROT_READ);
431 		if (ret < 0) {
432 			drm_mm_remove_node(node);
433 			mutex_unlock(&context->lock);
434 			return ret;
435 		}
436 
437 		context->flush_seq++;
438 	}
439 
440 	list_add_tail(&mapping->mmu_node, &context->mappings);
441 	mapping->use = 1;
442 
443 	mutex_unlock(&context->lock);
444 
445 	return 0;
446 }
447 
448 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
449 		  struct etnaviv_vram_mapping *mapping)
450 {
451 	struct drm_mm_node *node = &mapping->vram_node;
452 
453 	mutex_lock(&context->lock);
454 	mapping->use--;
455 
456 	if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
457 		mutex_unlock(&context->lock);
458 		return;
459 	}
460 
461 	etnaviv_context_unmap(context, node->start, node->size);
462 	drm_mm_remove_node(node);
463 	mutex_unlock(&context->lock);
464 }
465 
466 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
467 {
468 	return context->global->ops->dump_size(context);
469 }
470 
471 void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
472 {
473 	context->global->ops->dump(context, buf);
474 }
475 
476 int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
477 {
478 	enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
479 	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
480 	struct etnaviv_iommu_global *global;
481 	struct device *dev = gpu->drm->dev;
482 
483 	if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
484 		version = ETNAVIV_IOMMU_V2;
485 
486 	if (priv->mmu_global) {
487 		if (priv->mmu_global->version != version) {
488 			dev_err(gpu->dev,
489 				"MMU version doesn't match global version\n");
490 			return -ENXIO;
491 		}
492 
493 		priv->mmu_global->use++;
494 		return 0;
495 	}
496 
497 	global = kzalloc(sizeof(*global), GFP_KERNEL);
498 	if (!global)
499 		return -ENOMEM;
500 
501 	global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
502 					    GFP_KERNEL);
503 	if (!global->bad_page_cpu)
504 		goto free_global;
505 
506 	memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
507 
508 	if (version == ETNAVIV_IOMMU_V2) {
509 		global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
510 					       &global->v2.pta_dma, GFP_KERNEL);
511 		if (!global->v2.pta_cpu)
512 			goto free_bad_page;
513 	}
514 
515 	global->dev = dev;
516 	global->version = version;
517 	global->use = 1;
518 	mutex_init(&global->lock);
519 
520 	if (version == ETNAVIV_IOMMU_V1)
521 		global->ops = &etnaviv_iommuv1_ops;
522 	else
523 		global->ops = &etnaviv_iommuv2_ops;
524 
525 	priv->mmu_global = global;
526 
527 	return 0;
528 
529 free_bad_page:
530 	dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
531 free_global:
532 	kfree(global);
533 
534 	return -ENOMEM;
535 }
536 
537 void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
538 {
539 	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
540 	struct etnaviv_iommu_global *global = priv->mmu_global;
541 
542 	if (!global)
543 		return;
544 
545 	if (--global->use > 0)
546 		return;
547 
548 	if (global->v2.pta_cpu)
549 		dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
550 			    global->v2.pta_cpu, global->v2.pta_dma);
551 
552 	if (global->bad_page_cpu)
553 		dma_free_wc(global->dev, SZ_4K,
554 			    global->bad_page_cpu, global->bad_page_dma);
555 
556 	mutex_destroy(&global->lock);
557 	kfree(global);
558 
559 	priv->mmu_global = NULL;
560 }
561