xref: /linux/drivers/gpu/drm/etnaviv/etnaviv_mmu.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5 
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8 
9 #include "common.xml.h"
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gem.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
15 
16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17 				 unsigned long iova, size_t size)
18 {
19 	size_t unmapped_page, unmapped = 0;
20 	size_t pgsize = SZ_4K;
21 
22 	if (!IS_ALIGNED(iova | size, pgsize)) {
23 		pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
24 		       iova, size, pgsize);
25 		return;
26 	}
27 
28 	while (unmapped < size) {
29 		unmapped_page = context->global->ops->unmap(context, iova,
30 							    pgsize);
31 		if (!unmapped_page)
32 			break;
33 
34 		iova += unmapped_page;
35 		unmapped += unmapped_page;
36 	}
37 }
38 
39 static int etnaviv_context_map(struct etnaviv_iommu_context *context,
40 			      unsigned long iova, phys_addr_t paddr,
41 			      size_t size, int prot)
42 {
43 	unsigned long orig_iova = iova;
44 	size_t pgsize = SZ_4K;
45 	size_t orig_size = size;
46 	int ret = 0;
47 
48 	if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
49 		pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
50 		       iova, &paddr, size, pgsize);
51 		return -EINVAL;
52 	}
53 
54 	while (size) {
55 		ret = context->global->ops->map(context, iova, paddr, pgsize,
56 						prot);
57 		if (ret)
58 			break;
59 
60 		iova += pgsize;
61 		paddr += pgsize;
62 		size -= pgsize;
63 	}
64 
65 	/* unroll mapping in case something went wrong */
66 	if (ret)
67 		etnaviv_context_unmap(context, orig_iova, orig_size - size);
68 
69 	return ret;
70 }
71 
72 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
73 			     u32 iova, unsigned int va_len,
74 			     struct sg_table *sgt, int prot)
75 {
76 	struct scatterlist *sg;
77 	unsigned int da = iova;
78 	unsigned int i;
79 	int ret;
80 
81 	if (!context || !sgt)
82 		return -EINVAL;
83 
84 	for_each_sgtable_dma_sg(sgt, sg, i) {
85 		phys_addr_t pa = sg_dma_address(sg) - sg->offset;
86 		unsigned int da_len = sg_dma_len(sg) + sg->offset;
87 		unsigned int bytes = min_t(unsigned int, da_len, va_len);
88 
89 		VERB("map[%d]: %08x %pap(%x)", i, iova, &pa, bytes);
90 
91 		ret = etnaviv_context_map(context, da, pa, bytes, prot);
92 		if (ret)
93 			goto fail;
94 
95 		va_len -= bytes;
96 		da += bytes;
97 	}
98 
99 	context->flush_seq++;
100 
101 	return 0;
102 
103 fail:
104 	etnaviv_context_unmap(context, iova, da - iova);
105 	return ret;
106 }
107 
108 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
109 				struct sg_table *sgt, unsigned len)
110 {
111 	etnaviv_context_unmap(context, iova, len);
112 
113 	context->flush_seq++;
114 }
115 
116 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
117 	struct etnaviv_vram_mapping *mapping)
118 {
119 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
120 
121 	lockdep_assert_held(&context->lock);
122 
123 	etnaviv_iommu_unmap(context, mapping->vram_node.start,
124 			    etnaviv_obj->sgt, etnaviv_obj->size);
125 	drm_mm_remove_node(&mapping->vram_node);
126 }
127 
128 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
129 {
130 	struct etnaviv_iommu_context *context = mapping->context;
131 
132 	lockdep_assert_held(&context->lock);
133 	WARN_ON(mapping->use);
134 
135 	etnaviv_iommu_remove_mapping(context, mapping);
136 	etnaviv_iommu_context_put(mapping->context);
137 	mapping->context = NULL;
138 	list_del_init(&mapping->mmu_node);
139 }
140 
141 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
142 				   struct drm_mm_node *node, size_t size)
143 {
144 	struct etnaviv_vram_mapping *free = NULL;
145 	enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
146 	int ret;
147 
148 	lockdep_assert_held(&context->lock);
149 
150 	while (1) {
151 		struct etnaviv_vram_mapping *m, *n;
152 		struct drm_mm_scan scan;
153 		struct list_head list;
154 		bool found;
155 
156 		ret = drm_mm_insert_node_in_range(&context->mm, node,
157 						  size, 0, 0, 0, U64_MAX, mode);
158 		if (ret != -ENOSPC)
159 			break;
160 
161 		/* Try to retire some entries */
162 		drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
163 
164 		found = 0;
165 		INIT_LIST_HEAD(&list);
166 		list_for_each_entry(free, &context->mappings, mmu_node) {
167 			/* If this vram node has not been used, skip this. */
168 			if (!free->vram_node.mm)
169 				continue;
170 
171 			/*
172 			 * If the iova is pinned, then it's in-use,
173 			 * so we must keep its mapping.
174 			 */
175 			if (free->use)
176 				continue;
177 
178 			list_add(&free->scan_node, &list);
179 			if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
180 				found = true;
181 				break;
182 			}
183 		}
184 
185 		if (!found) {
186 			/* Nothing found, clean up and fail */
187 			list_for_each_entry_safe(m, n, &list, scan_node)
188 				BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
189 			break;
190 		}
191 
192 		/*
193 		 * drm_mm does not allow any other operations while
194 		 * scanning, so we have to remove all blocks first.
195 		 * If drm_mm_scan_remove_block() returns false, we
196 		 * can leave the block pinned.
197 		 */
198 		list_for_each_entry_safe(m, n, &list, scan_node)
199 			if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
200 				list_del_init(&m->scan_node);
201 
202 		/*
203 		 * Unmap the blocks which need to be reaped from the MMU.
204 		 * Clear the mmu pointer to prevent the mapping_get finding
205 		 * this mapping.
206 		 */
207 		list_for_each_entry_safe(m, n, &list, scan_node) {
208 			etnaviv_iommu_reap_mapping(m);
209 			list_del_init(&m->scan_node);
210 		}
211 
212 		mode = DRM_MM_INSERT_EVICT;
213 
214 		/*
215 		 * We removed enough mappings so that the new allocation will
216 		 * succeed, retry the allocation one more time.
217 		 */
218 	}
219 
220 	return ret;
221 }
222 
223 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
224 		   struct drm_mm_node *node, size_t size, u64 va)
225 {
226 	struct etnaviv_vram_mapping *m, *n;
227 	struct drm_mm_node *scan_node;
228 	LIST_HEAD(scan_list);
229 	int ret;
230 
231 	lockdep_assert_held(&context->lock);
232 
233 	ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
234 					  va + size, DRM_MM_INSERT_LOWEST);
235 	if (ret != -ENOSPC)
236 		return ret;
237 
238 	/*
239 	 * When we can't insert the node, due to a existing mapping blocking
240 	 * the address space, there are two possible reasons:
241 	 * 1. Userspace genuinely messed up and tried to reuse address space
242 	 * before the last job using this VMA has finished executing.
243 	 * 2. The existing buffer mappings are idle, but the buffers are not
244 	 * destroyed yet (likely due to being referenced by another context) in
245 	 * which case the mappings will not be cleaned up and we must reap them
246 	 * here to make space for the new mapping.
247 	 */
248 
249 	drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
250 		m = container_of(scan_node, struct etnaviv_vram_mapping,
251 				 vram_node);
252 
253 		if (m->use)
254 			return -ENOSPC;
255 
256 		list_add(&m->scan_node, &scan_list);
257 	}
258 
259 	list_for_each_entry_safe(m, n, &scan_list, scan_node) {
260 		etnaviv_iommu_reap_mapping(m);
261 		list_del_init(&m->scan_node);
262 	}
263 
264 	return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
265 					   va + size, DRM_MM_INSERT_LOWEST);
266 }
267 
268 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
269 	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
270 	struct etnaviv_vram_mapping *mapping, u64 va)
271 {
272 	struct sg_table *sgt = etnaviv_obj->sgt;
273 	struct drm_mm_node *node;
274 	int ret;
275 
276 	lockdep_assert_held(&etnaviv_obj->lock);
277 
278 	mutex_lock(&context->lock);
279 
280 	/* v1 MMU can optimize single entry (contiguous) scatterlists */
281 	if (context->global->version == ETNAVIV_IOMMU_V1 &&
282 	    sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
283 		u32 iova;
284 
285 		iova = sg_dma_address(sgt->sgl) - memory_base;
286 		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
287 			mapping->iova = iova;
288 			mapping->context = etnaviv_iommu_context_get(context);
289 			list_add_tail(&mapping->mmu_node, &context->mappings);
290 			ret = 0;
291 			goto unlock;
292 		}
293 	}
294 
295 	node = &mapping->vram_node;
296 
297 	if (va)
298 		ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va);
299 	else
300 		ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size);
301 	if (ret < 0)
302 		goto unlock;
303 
304 	mapping->iova = node->start;
305 	ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt,
306 				ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
307 
308 	if (ret < 0) {
309 		drm_mm_remove_node(node);
310 		goto unlock;
311 	}
312 
313 	mapping->context = etnaviv_iommu_context_get(context);
314 	list_add_tail(&mapping->mmu_node, &context->mappings);
315 unlock:
316 	mutex_unlock(&context->lock);
317 
318 	return ret;
319 }
320 
321 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
322 	struct etnaviv_vram_mapping *mapping)
323 {
324 	WARN_ON(mapping->use);
325 
326 	mutex_lock(&context->lock);
327 
328 	/* Bail if the mapping has been reaped by another thread */
329 	if (!mapping->context) {
330 		mutex_unlock(&context->lock);
331 		return;
332 	}
333 
334 	/* If the vram node is on the mm, unmap and remove the node */
335 	if (mapping->vram_node.mm == &context->mm)
336 		etnaviv_iommu_remove_mapping(context, mapping);
337 
338 	list_del(&mapping->mmu_node);
339 	mutex_unlock(&context->lock);
340 	etnaviv_iommu_context_put(context);
341 }
342 
343 static void etnaviv_iommu_context_free(struct kref *kref)
344 {
345 	struct etnaviv_iommu_context *context =
346 		container_of(kref, struct etnaviv_iommu_context, refcount);
347 
348 	etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
349 	mutex_destroy(&context->lock);
350 	context->global->ops->free(context);
351 }
352 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
353 {
354 	kref_put(&context->refcount, etnaviv_iommu_context_free);
355 }
356 
357 struct etnaviv_iommu_context *
358 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
359 			   struct etnaviv_cmdbuf_suballoc *suballoc)
360 {
361 	struct etnaviv_iommu_context *ctx;
362 	int ret;
363 
364 	if (global->version == ETNAVIV_IOMMU_V1)
365 		ctx = etnaviv_iommuv1_context_alloc(global);
366 	else
367 		ctx = etnaviv_iommuv2_context_alloc(global);
368 
369 	if (!ctx)
370 		return NULL;
371 
372 	ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
373 					  global->memory_base);
374 	if (ret)
375 		goto out_free;
376 
377 	if (global->version == ETNAVIV_IOMMU_V1 &&
378 	    ctx->cmdbuf_mapping.iova > 0x80000000) {
379 		dev_err(global->dev,
380 		        "command buffer outside valid memory window\n");
381 		goto out_unmap;
382 	}
383 
384 	return ctx;
385 
386 out_unmap:
387 	etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
388 out_free:
389 	global->ops->free(ctx);
390 	return NULL;
391 }
392 
393 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
394 			   struct etnaviv_iommu_context *context)
395 {
396 	context->global->ops->restore(gpu, context);
397 }
398 
399 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
400 				  struct etnaviv_vram_mapping *mapping,
401 				  u32 memory_base, dma_addr_t paddr,
402 				  size_t size)
403 {
404 	mutex_lock(&context->lock);
405 
406 	if (mapping->use > 0) {
407 		mapping->use++;
408 		mutex_unlock(&context->lock);
409 		return 0;
410 	}
411 
412 	/*
413 	 * For MMUv1 we don't add the suballoc region to the pagetables, as
414 	 * those GPUs can only work with cmdbufs accessed through the linear
415 	 * window. Instead we manufacture a mapping to make it look uniform
416 	 * to the upper layers.
417 	 */
418 	if (context->global->version == ETNAVIV_IOMMU_V1) {
419 		mapping->iova = paddr - memory_base;
420 	} else {
421 		struct drm_mm_node *node = &mapping->vram_node;
422 		int ret;
423 
424 		ret = etnaviv_iommu_find_iova(context, node, size);
425 		if (ret < 0) {
426 			mutex_unlock(&context->lock);
427 			return ret;
428 		}
429 
430 		mapping->iova = node->start;
431 		ret = etnaviv_context_map(context, node->start, paddr, size,
432 					  ETNAVIV_PROT_READ);
433 		if (ret < 0) {
434 			drm_mm_remove_node(node);
435 			mutex_unlock(&context->lock);
436 			return ret;
437 		}
438 
439 		context->flush_seq++;
440 	}
441 
442 	list_add_tail(&mapping->mmu_node, &context->mappings);
443 	mapping->use = 1;
444 
445 	mutex_unlock(&context->lock);
446 
447 	return 0;
448 }
449 
450 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
451 		  struct etnaviv_vram_mapping *mapping)
452 {
453 	struct drm_mm_node *node = &mapping->vram_node;
454 
455 	mutex_lock(&context->lock);
456 	mapping->use--;
457 
458 	if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
459 		mutex_unlock(&context->lock);
460 		return;
461 	}
462 
463 	etnaviv_context_unmap(context, node->start, node->size);
464 	drm_mm_remove_node(node);
465 	mutex_unlock(&context->lock);
466 }
467 
468 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
469 {
470 	return context->global->ops->dump_size(context);
471 }
472 
473 void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
474 {
475 	context->global->ops->dump(context, buf);
476 }
477 
478 int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
479 {
480 	enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
481 	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
482 	struct etnaviv_iommu_global *global;
483 	struct device *dev = gpu->drm->dev;
484 
485 	if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
486 		version = ETNAVIV_IOMMU_V2;
487 
488 	if (priv->mmu_global) {
489 		if (priv->mmu_global->version != version) {
490 			dev_err(gpu->dev,
491 				"MMU version doesn't match global version\n");
492 			return -ENXIO;
493 		}
494 
495 		priv->mmu_global->use++;
496 		return 0;
497 	}
498 
499 	global = kzalloc(sizeof(*global), GFP_KERNEL);
500 	if (!global)
501 		return -ENOMEM;
502 
503 	global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
504 					    GFP_KERNEL);
505 	if (!global->bad_page_cpu)
506 		goto free_global;
507 
508 	memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
509 
510 	if (version == ETNAVIV_IOMMU_V2) {
511 		global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
512 					       &global->v2.pta_dma, GFP_KERNEL);
513 		if (!global->v2.pta_cpu)
514 			goto free_bad_page;
515 	}
516 
517 	global->dev = dev;
518 	global->version = version;
519 	global->use = 1;
520 	mutex_init(&global->lock);
521 
522 	if (version == ETNAVIV_IOMMU_V1)
523 		global->ops = &etnaviv_iommuv1_ops;
524 	else
525 		global->ops = &etnaviv_iommuv2_ops;
526 
527 	priv->mmu_global = global;
528 
529 	return 0;
530 
531 free_bad_page:
532 	dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
533 free_global:
534 	kfree(global);
535 
536 	return -ENOMEM;
537 }
538 
539 void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
540 {
541 	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
542 	struct etnaviv_iommu_global *global = priv->mmu_global;
543 
544 	if (!global)
545 		return;
546 
547 	if (--global->use > 0)
548 		return;
549 
550 	if (global->v2.pta_cpu)
551 		dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
552 			    global->v2.pta_cpu, global->v2.pta_dma);
553 
554 	if (global->bad_page_cpu)
555 		dma_free_wc(global->dev, SZ_4K,
556 			    global->bad_page_cpu, global->bad_page_dma);
557 
558 	mutex_destroy(&global->lock);
559 	kfree(global);
560 
561 	priv->mmu_global = NULL;
562 }
563