xref: /linux/drivers/gpu/drm/etnaviv/etnaviv_mmu.c (revision 2c1ed907520c50326b8f604907a8478b27881a2e)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2015-2018 Etnaviv Project
4  */
5 
6 #include <linux/dma-mapping.h>
7 #include <linux/scatterlist.h>
8 
9 #include "common.xml.h"
10 #include "etnaviv_cmdbuf.h"
11 #include "etnaviv_drv.h"
12 #include "etnaviv_gem.h"
13 #include "etnaviv_gpu.h"
14 #include "etnaviv_mmu.h"
15 
etnaviv_context_unmap(struct etnaviv_iommu_context * context,unsigned long iova,size_t size)16 static void etnaviv_context_unmap(struct etnaviv_iommu_context *context,
17 				 unsigned long iova, size_t size)
18 {
19 	size_t unmapped_page, unmapped = 0;
20 	size_t pgsize = SZ_4K;
21 
22 	while (unmapped < size) {
23 		unmapped_page = context->global->ops->unmap(context, iova,
24 							    pgsize);
25 		if (!unmapped_page)
26 			break;
27 
28 		iova += unmapped_page;
29 		unmapped += unmapped_page;
30 	}
31 }
32 
etnaviv_context_map(struct etnaviv_iommu_context * context,unsigned long iova,phys_addr_t paddr,size_t size,int prot)33 static int etnaviv_context_map(struct etnaviv_iommu_context *context,
34 			      unsigned long iova, phys_addr_t paddr,
35 			      size_t size, int prot)
36 {
37 	unsigned long orig_iova = iova;
38 	size_t pgsize = SZ_4K;
39 	size_t orig_size = size;
40 	int ret = 0;
41 
42 	while (size) {
43 		ret = context->global->ops->map(context, iova, paddr, pgsize,
44 						prot);
45 		if (ret)
46 			break;
47 
48 		iova += pgsize;
49 		paddr += pgsize;
50 		size -= pgsize;
51 	}
52 
53 	/* unroll mapping in case something went wrong */
54 	if (ret)
55 		etnaviv_context_unmap(context, orig_iova, orig_size - size);
56 
57 	return ret;
58 }
59 
etnaviv_iommu_map(struct etnaviv_iommu_context * context,u32 iova,unsigned int va_len,struct sg_table * sgt,int prot)60 static int etnaviv_iommu_map(struct etnaviv_iommu_context *context,
61 			     u32 iova, unsigned int va_len,
62 			     struct sg_table *sgt, int prot)
63 {
64 	struct scatterlist *sg;
65 	unsigned int da = iova;
66 	unsigned int i;
67 	int ret;
68 
69 	if (!context || !sgt)
70 		return -EINVAL;
71 
72 	for_each_sgtable_dma_sg(sgt, sg, i) {
73 		phys_addr_t pa = sg_dma_address(sg);
74 		unsigned int da_len = sg_dma_len(sg);
75 		unsigned int bytes = min_t(unsigned int, da_len, va_len);
76 
77 		VERB("map[%d]: %08x %pap(%x)", i, da, &pa, bytes);
78 
79 		if (!IS_ALIGNED(iova | pa | bytes, SZ_4K)) {
80 			dev_err(context->global->dev,
81 				"unaligned: iova 0x%x pa %pa size 0x%x\n",
82 				iova, &pa, bytes);
83 			ret = -EINVAL;
84 			goto fail;
85 		}
86 
87 		ret = etnaviv_context_map(context, da, pa, bytes, prot);
88 		if (ret)
89 			goto fail;
90 
91 		va_len -= bytes;
92 		da += bytes;
93 	}
94 
95 	context->flush_seq++;
96 
97 	return 0;
98 
99 fail:
100 	etnaviv_context_unmap(context, iova, da - iova);
101 	return ret;
102 }
103 
etnaviv_iommu_unmap(struct etnaviv_iommu_context * context,u32 iova,struct sg_table * sgt,unsigned len)104 static void etnaviv_iommu_unmap(struct etnaviv_iommu_context *context, u32 iova,
105 				struct sg_table *sgt, unsigned len)
106 {
107 	etnaviv_context_unmap(context, iova, len);
108 
109 	context->flush_seq++;
110 }
111 
etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)112 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu_context *context,
113 	struct etnaviv_vram_mapping *mapping)
114 {
115 	struct etnaviv_gem_object *etnaviv_obj = mapping->object;
116 
117 	lockdep_assert_held(&context->lock);
118 
119 	etnaviv_iommu_unmap(context, mapping->vram_node.start,
120 			    etnaviv_obj->sgt, etnaviv_obj->size);
121 	drm_mm_remove_node(&mapping->vram_node);
122 }
123 
etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping * mapping)124 void etnaviv_iommu_reap_mapping(struct etnaviv_vram_mapping *mapping)
125 {
126 	struct etnaviv_iommu_context *context = mapping->context;
127 
128 	lockdep_assert_held(&context->lock);
129 	WARN_ON(mapping->use);
130 
131 	etnaviv_iommu_remove_mapping(context, mapping);
132 	etnaviv_iommu_context_put(mapping->context);
133 	mapping->context = NULL;
134 	list_del_init(&mapping->mmu_node);
135 }
136 
etnaviv_iommu_find_iova(struct etnaviv_iommu_context * context,struct drm_mm_node * node,size_t size)137 static int etnaviv_iommu_find_iova(struct etnaviv_iommu_context *context,
138 				   struct drm_mm_node *node, size_t size)
139 {
140 	struct etnaviv_vram_mapping *free = NULL;
141 	enum drm_mm_insert_mode mode = DRM_MM_INSERT_LOW;
142 	int ret;
143 
144 	lockdep_assert_held(&context->lock);
145 
146 	while (1) {
147 		struct etnaviv_vram_mapping *m, *n;
148 		struct drm_mm_scan scan;
149 		struct list_head list;
150 		bool found;
151 
152 		ret = drm_mm_insert_node_in_range(&context->mm, node,
153 						  size, 0, 0, 0, U64_MAX, mode);
154 		if (ret != -ENOSPC)
155 			break;
156 
157 		/* Try to retire some entries */
158 		drm_mm_scan_init(&scan, &context->mm, size, 0, 0, mode);
159 
160 		found = 0;
161 		INIT_LIST_HEAD(&list);
162 		list_for_each_entry(free, &context->mappings, mmu_node) {
163 			/* If this vram node has not been used, skip this. */
164 			if (!free->vram_node.mm)
165 				continue;
166 
167 			/*
168 			 * If the iova is pinned, then it's in-use,
169 			 * so we must keep its mapping.
170 			 */
171 			if (free->use)
172 				continue;
173 
174 			list_add(&free->scan_node, &list);
175 			if (drm_mm_scan_add_block(&scan, &free->vram_node)) {
176 				found = true;
177 				break;
178 			}
179 		}
180 
181 		if (!found) {
182 			/* Nothing found, clean up and fail */
183 			list_for_each_entry_safe(m, n, &list, scan_node)
184 				BUG_ON(drm_mm_scan_remove_block(&scan, &m->vram_node));
185 			break;
186 		}
187 
188 		/*
189 		 * drm_mm does not allow any other operations while
190 		 * scanning, so we have to remove all blocks first.
191 		 * If drm_mm_scan_remove_block() returns false, we
192 		 * can leave the block pinned.
193 		 */
194 		list_for_each_entry_safe(m, n, &list, scan_node)
195 			if (!drm_mm_scan_remove_block(&scan, &m->vram_node))
196 				list_del_init(&m->scan_node);
197 
198 		/*
199 		 * Unmap the blocks which need to be reaped from the MMU.
200 		 * Clear the mmu pointer to prevent the mapping_get finding
201 		 * this mapping.
202 		 */
203 		list_for_each_entry_safe(m, n, &list, scan_node) {
204 			etnaviv_iommu_reap_mapping(m);
205 			list_del_init(&m->scan_node);
206 		}
207 
208 		mode = DRM_MM_INSERT_EVICT;
209 
210 		/*
211 		 * We removed enough mappings so that the new allocation will
212 		 * succeed, retry the allocation one more time.
213 		 */
214 	}
215 
216 	return ret;
217 }
218 
etnaviv_iommu_insert_exact(struct etnaviv_iommu_context * context,struct drm_mm_node * node,size_t size,u64 va)219 static int etnaviv_iommu_insert_exact(struct etnaviv_iommu_context *context,
220 		   struct drm_mm_node *node, size_t size, u64 va)
221 {
222 	struct etnaviv_vram_mapping *m, *n;
223 	struct drm_mm_node *scan_node;
224 	LIST_HEAD(scan_list);
225 	int ret;
226 
227 	lockdep_assert_held(&context->lock);
228 
229 	ret = drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
230 					  va + size, DRM_MM_INSERT_LOWEST);
231 	if (ret != -ENOSPC)
232 		return ret;
233 
234 	/*
235 	 * When we can't insert the node, due to a existing mapping blocking
236 	 * the address space, there are two possible reasons:
237 	 * 1. Userspace genuinely messed up and tried to reuse address space
238 	 * before the last job using this VMA has finished executing.
239 	 * 2. The existing buffer mappings are idle, but the buffers are not
240 	 * destroyed yet (likely due to being referenced by another context) in
241 	 * which case the mappings will not be cleaned up and we must reap them
242 	 * here to make space for the new mapping.
243 	 */
244 
245 	drm_mm_for_each_node_in_range(scan_node, &context->mm, va, va + size) {
246 		m = container_of(scan_node, struct etnaviv_vram_mapping,
247 				 vram_node);
248 
249 		if (m->use)
250 			return -ENOSPC;
251 
252 		list_add(&m->scan_node, &scan_list);
253 	}
254 
255 	list_for_each_entry_safe(m, n, &scan_list, scan_node) {
256 		etnaviv_iommu_reap_mapping(m);
257 		list_del_init(&m->scan_node);
258 	}
259 
260 	return drm_mm_insert_node_in_range(&context->mm, node, size, 0, 0, va,
261 					   va + size, DRM_MM_INSERT_LOWEST);
262 }
263 
etnaviv_iommu_map_gem(struct etnaviv_iommu_context * context,struct etnaviv_gem_object * etnaviv_obj,u32 memory_base,struct etnaviv_vram_mapping * mapping,u64 va)264 int etnaviv_iommu_map_gem(struct etnaviv_iommu_context *context,
265 	struct etnaviv_gem_object *etnaviv_obj, u32 memory_base,
266 	struct etnaviv_vram_mapping *mapping, u64 va)
267 {
268 	struct sg_table *sgt = etnaviv_obj->sgt;
269 	struct drm_mm_node *node;
270 	int ret;
271 
272 	lockdep_assert_held(&etnaviv_obj->lock);
273 
274 	mutex_lock(&context->lock);
275 
276 	/* v1 MMU can optimize single entry (contiguous) scatterlists */
277 	if (context->global->version == ETNAVIV_IOMMU_V1 &&
278 	    sgt->nents == 1 && !(etnaviv_obj->flags & ETNA_BO_FORCE_MMU)) {
279 		u32 iova;
280 
281 		iova = sg_dma_address(sgt->sgl) - memory_base;
282 		if (iova < 0x80000000 - sg_dma_len(sgt->sgl)) {
283 			mapping->iova = iova;
284 			mapping->context = etnaviv_iommu_context_get(context);
285 			list_add_tail(&mapping->mmu_node, &context->mappings);
286 			ret = 0;
287 			goto unlock;
288 		}
289 	}
290 
291 	node = &mapping->vram_node;
292 
293 	if (va)
294 		ret = etnaviv_iommu_insert_exact(context, node, etnaviv_obj->size, va);
295 	else
296 		ret = etnaviv_iommu_find_iova(context, node, etnaviv_obj->size);
297 	if (ret < 0)
298 		goto unlock;
299 
300 	mapping->iova = node->start;
301 	ret = etnaviv_iommu_map(context, node->start, etnaviv_obj->size, sgt,
302 				ETNAVIV_PROT_READ | ETNAVIV_PROT_WRITE);
303 
304 	if (ret < 0) {
305 		drm_mm_remove_node(node);
306 		goto unlock;
307 	}
308 
309 	mapping->context = etnaviv_iommu_context_get(context);
310 	list_add_tail(&mapping->mmu_node, &context->mappings);
311 unlock:
312 	mutex_unlock(&context->lock);
313 
314 	return ret;
315 }
316 
etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)317 void etnaviv_iommu_unmap_gem(struct etnaviv_iommu_context *context,
318 	struct etnaviv_vram_mapping *mapping)
319 {
320 	WARN_ON(mapping->use);
321 
322 	mutex_lock(&context->lock);
323 
324 	/* Bail if the mapping has been reaped by another thread */
325 	if (!mapping->context) {
326 		mutex_unlock(&context->lock);
327 		return;
328 	}
329 
330 	/* If the vram node is on the mm, unmap and remove the node */
331 	if (mapping->vram_node.mm == &context->mm)
332 		etnaviv_iommu_remove_mapping(context, mapping);
333 
334 	list_del(&mapping->mmu_node);
335 	mutex_unlock(&context->lock);
336 	etnaviv_iommu_context_put(context);
337 }
338 
etnaviv_iommu_context_free(struct kref * kref)339 static void etnaviv_iommu_context_free(struct kref *kref)
340 {
341 	struct etnaviv_iommu_context *context =
342 		container_of(kref, struct etnaviv_iommu_context, refcount);
343 
344 	etnaviv_cmdbuf_suballoc_unmap(context, &context->cmdbuf_mapping);
345 	mutex_destroy(&context->lock);
346 	context->global->ops->free(context);
347 }
etnaviv_iommu_context_put(struct etnaviv_iommu_context * context)348 void etnaviv_iommu_context_put(struct etnaviv_iommu_context *context)
349 {
350 	kref_put(&context->refcount, etnaviv_iommu_context_free);
351 }
352 
353 struct etnaviv_iommu_context *
etnaviv_iommu_context_init(struct etnaviv_iommu_global * global,struct etnaviv_cmdbuf_suballoc * suballoc)354 etnaviv_iommu_context_init(struct etnaviv_iommu_global *global,
355 			   struct etnaviv_cmdbuf_suballoc *suballoc)
356 {
357 	struct etnaviv_iommu_context *ctx;
358 	int ret;
359 
360 	if (global->version == ETNAVIV_IOMMU_V1)
361 		ctx = etnaviv_iommuv1_context_alloc(global);
362 	else
363 		ctx = etnaviv_iommuv2_context_alloc(global);
364 
365 	if (!ctx)
366 		return NULL;
367 
368 	ret = etnaviv_cmdbuf_suballoc_map(suballoc, ctx, &ctx->cmdbuf_mapping,
369 					  global->memory_base);
370 	if (ret)
371 		goto out_free;
372 
373 	if (global->version == ETNAVIV_IOMMU_V1 &&
374 	    ctx->cmdbuf_mapping.iova > 0x80000000) {
375 		dev_err(global->dev,
376 		        "command buffer outside valid memory window\n");
377 		goto out_unmap;
378 	}
379 
380 	return ctx;
381 
382 out_unmap:
383 	etnaviv_cmdbuf_suballoc_unmap(ctx, &ctx->cmdbuf_mapping);
384 out_free:
385 	global->ops->free(ctx);
386 	return NULL;
387 }
388 
etnaviv_iommu_restore(struct etnaviv_gpu * gpu,struct etnaviv_iommu_context * context)389 void etnaviv_iommu_restore(struct etnaviv_gpu *gpu,
390 			   struct etnaviv_iommu_context *context)
391 {
392 	context->global->ops->restore(gpu, context);
393 }
394 
etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping,u32 memory_base,dma_addr_t paddr,size_t size)395 int etnaviv_iommu_get_suballoc_va(struct etnaviv_iommu_context *context,
396 				  struct etnaviv_vram_mapping *mapping,
397 				  u32 memory_base, dma_addr_t paddr,
398 				  size_t size)
399 {
400 	mutex_lock(&context->lock);
401 
402 	if (mapping->use > 0) {
403 		mapping->use++;
404 		mutex_unlock(&context->lock);
405 		return 0;
406 	}
407 
408 	/*
409 	 * For MMUv1 we don't add the suballoc region to the pagetables, as
410 	 * those GPUs can only work with cmdbufs accessed through the linear
411 	 * window. Instead we manufacture a mapping to make it look uniform
412 	 * to the upper layers.
413 	 */
414 	if (context->global->version == ETNAVIV_IOMMU_V1) {
415 		mapping->iova = paddr - memory_base;
416 	} else {
417 		struct drm_mm_node *node = &mapping->vram_node;
418 		int ret;
419 
420 		ret = etnaviv_iommu_find_iova(context, node, size);
421 		if (ret < 0) {
422 			mutex_unlock(&context->lock);
423 			return ret;
424 		}
425 
426 		mapping->iova = node->start;
427 		ret = etnaviv_context_map(context, node->start, paddr, size,
428 					  ETNAVIV_PROT_READ);
429 		if (ret < 0) {
430 			drm_mm_remove_node(node);
431 			mutex_unlock(&context->lock);
432 			return ret;
433 		}
434 
435 		context->flush_seq++;
436 	}
437 
438 	list_add_tail(&mapping->mmu_node, &context->mappings);
439 	mapping->use = 1;
440 
441 	mutex_unlock(&context->lock);
442 
443 	return 0;
444 }
445 
etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context * context,struct etnaviv_vram_mapping * mapping)446 void etnaviv_iommu_put_suballoc_va(struct etnaviv_iommu_context *context,
447 		  struct etnaviv_vram_mapping *mapping)
448 {
449 	struct drm_mm_node *node = &mapping->vram_node;
450 
451 	mutex_lock(&context->lock);
452 	mapping->use--;
453 
454 	if (mapping->use > 0 || context->global->version == ETNAVIV_IOMMU_V1) {
455 		mutex_unlock(&context->lock);
456 		return;
457 	}
458 
459 	etnaviv_context_unmap(context, node->start, node->size);
460 	drm_mm_remove_node(node);
461 	mutex_unlock(&context->lock);
462 }
463 
etnaviv_iommu_dump_size(struct etnaviv_iommu_context * context)464 size_t etnaviv_iommu_dump_size(struct etnaviv_iommu_context *context)
465 {
466 	return context->global->ops->dump_size(context);
467 }
468 
etnaviv_iommu_dump(struct etnaviv_iommu_context * context,void * buf)469 void etnaviv_iommu_dump(struct etnaviv_iommu_context *context, void *buf)
470 {
471 	context->global->ops->dump(context, buf);
472 }
473 
etnaviv_iommu_global_init(struct etnaviv_gpu * gpu)474 int etnaviv_iommu_global_init(struct etnaviv_gpu *gpu)
475 {
476 	enum etnaviv_iommu_version version = ETNAVIV_IOMMU_V1;
477 	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
478 	struct etnaviv_iommu_global *global;
479 	struct device *dev = gpu->drm->dev;
480 
481 	if (gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION)
482 		version = ETNAVIV_IOMMU_V2;
483 
484 	if (priv->mmu_global) {
485 		if (priv->mmu_global->version != version) {
486 			dev_err(gpu->dev,
487 				"MMU version doesn't match global version\n");
488 			return -ENXIO;
489 		}
490 
491 		priv->mmu_global->use++;
492 		return 0;
493 	}
494 
495 	global = kzalloc(sizeof(*global), GFP_KERNEL);
496 	if (!global)
497 		return -ENOMEM;
498 
499 	global->bad_page_cpu = dma_alloc_wc(dev, SZ_4K, &global->bad_page_dma,
500 					    GFP_KERNEL);
501 	if (!global->bad_page_cpu)
502 		goto free_global;
503 
504 	memset32(global->bad_page_cpu, 0xdead55aa, SZ_4K / sizeof(u32));
505 
506 	if (version == ETNAVIV_IOMMU_V2) {
507 		global->v2.pta_cpu = dma_alloc_wc(dev, ETNAVIV_PTA_SIZE,
508 					       &global->v2.pta_dma, GFP_KERNEL);
509 		if (!global->v2.pta_cpu)
510 			goto free_bad_page;
511 	}
512 
513 	global->dev = dev;
514 	global->version = version;
515 	global->use = 1;
516 	mutex_init(&global->lock);
517 
518 	if (version == ETNAVIV_IOMMU_V1)
519 		global->ops = &etnaviv_iommuv1_ops;
520 	else
521 		global->ops = &etnaviv_iommuv2_ops;
522 
523 	priv->mmu_global = global;
524 
525 	return 0;
526 
527 free_bad_page:
528 	dma_free_wc(dev, SZ_4K, global->bad_page_cpu, global->bad_page_dma);
529 free_global:
530 	kfree(global);
531 
532 	return -ENOMEM;
533 }
534 
etnaviv_iommu_global_fini(struct etnaviv_gpu * gpu)535 void etnaviv_iommu_global_fini(struct etnaviv_gpu *gpu)
536 {
537 	struct etnaviv_drm_private *priv = gpu->drm->dev_private;
538 	struct etnaviv_iommu_global *global = priv->mmu_global;
539 
540 	if (!global)
541 		return;
542 
543 	if (--global->use > 0)
544 		return;
545 
546 	if (global->v2.pta_cpu)
547 		dma_free_wc(global->dev, ETNAVIV_PTA_SIZE,
548 			    global->v2.pta_cpu, global->v2.pta_dma);
549 
550 	if (global->bad_page_cpu)
551 		dma_free_wc(global->dev, SZ_4K,
552 			    global->bad_page_cpu, global->bad_page_dma);
553 
554 	mutex_destroy(&global->lock);
555 	kfree(global);
556 
557 	priv->mmu_global = NULL;
558 }
559