xref: /linux/drivers/gpu/drm/msm/msm_gem_vma.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include "drm/drm_file.h"
8 #include "drm/msm_drm.h"
9 #include "linux/file.h"
10 #include "linux/sync_file.h"
11 
12 #include "msm_drv.h"
13 #include "msm_gem.h"
14 #include "msm_gpu.h"
15 #include "msm_mmu.h"
16 #include "msm_syncobj.h"
17 
18 #define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
19 
20 static uint vm_log_shift = 0;
21 MODULE_PARM_DESC(vm_log_shift, "Length of VM op log");
22 module_param_named(vm_log_shift, vm_log_shift, uint, 0600);
23 
24 /**
25  * struct msm_vm_map_op - create new pgtable mapping
26  */
27 struct msm_vm_map_op {
28 	/** @iova: start address for mapping */
29 	uint64_t iova;
30 	/** @range: size of the region to map */
31 	uint64_t range;
32 	/** @offset: offset into @sgt to map */
33 	uint64_t offset;
34 	/** @sgt: pages to map, or NULL for a PRR mapping */
35 	struct sg_table *sgt;
36 	/** @prot: the mapping protection flags */
37 	int prot;
38 
39 	/**
40 	 * @queue_id: The id of the submitqueue the operation is performed
41 	 * on, or zero for (in particular) UNMAP ops triggered outside of
42 	 * a submitqueue (ie. process cleanup)
43 	 */
44 	int queue_id;
45 };
46 
47 /**
48  * struct msm_vm_unmap_op - unmap a range of pages from pgtable
49  */
50 struct msm_vm_unmap_op {
51 	/** @iova: start address for unmap */
52 	uint64_t iova;
53 	/** @range: size of region to unmap */
54 	uint64_t range;
55 
56 	/** @reason: The reason for the unmap */
57 	const char *reason;
58 
59 	/**
60 	 * @queue_id: The id of the submitqueue the operation is performed
61 	 * on, or zero for (in particular) UNMAP ops triggered outside of
62 	 * a submitqueue (ie. process cleanup)
63 	 */
64 	int queue_id;
65 };
66 
67 /**
68  * struct msm_vma_op - A MAP or UNMAP operation
69  */
70 struct msm_vm_op {
71 	/** @op: The operation type */
72 	enum {
73 		MSM_VM_OP_MAP = 1,
74 		MSM_VM_OP_UNMAP,
75 	} op;
76 	union {
77 		/** @map: Parameters used if op == MSM_VMA_OP_MAP */
78 		struct msm_vm_map_op map;
79 		/** @unmap: Parameters used if op == MSM_VMA_OP_UNMAP */
80 		struct msm_vm_unmap_op unmap;
81 	};
82 	/** @node: list head in msm_vm_bind_job::vm_ops */
83 	struct list_head node;
84 
85 	/**
86 	 * @obj: backing object for pages to be mapped/unmapped
87 	 *
88 	 * Async unmap ops, in particular, must hold a reference to the
89 	 * original GEM object backing the mapping that will be unmapped.
90 	 * But the same can be required in the map path, for example if
91 	 * there is not a corresponding unmap op, such as process exit.
92 	 *
93 	 * This ensures that the pages backing the mapping are not freed
94 	 * before the mapping is torn down.
95 	 */
96 	struct drm_gem_object *obj;
97 };
98 
99 /**
100  * struct msm_vm_bind_job - Tracking for a VM_BIND ioctl
101  *
102  * A table of userspace requested VM updates (MSM_VM_BIND_OP_UNMAP/MAP/MAP_NULL)
103  * gets applied to the vm, generating a list of VM ops (MSM_VM_OP_MAP/UNMAP)
104  * which are applied to the pgtables asynchronously.  For example a userspace
105  * requested MSM_VM_BIND_OP_MAP could end up generating both an MSM_VM_OP_UNMAP
106  * to unmap an existing mapping, and a MSM_VM_OP_MAP to apply the new mapping.
107  */
108 struct msm_vm_bind_job {
109 	/** @base: base class for drm_sched jobs */
110 	struct drm_sched_job base;
111 	/** @vm: The VM being operated on */
112 	struct drm_gpuvm *vm;
113 	/** @fence: The fence that is signaled when job completes */
114 	struct dma_fence *fence;
115 	/** @queue: The queue that the job runs on */
116 	struct msm_gpu_submitqueue *queue;
117 	/** @prealloc: Tracking for pre-allocated MMU pgtable pages */
118 	struct msm_mmu_prealloc prealloc;
119 	/** @vm_ops: a list of struct msm_vm_op */
120 	struct list_head vm_ops;
121 	/** @bos_pinned: are the GEM objects being bound pinned? */
122 	bool bos_pinned;
123 	/** @nr_ops: the number of userspace requested ops */
124 	unsigned int nr_ops;
125 	/**
126 	 * @ops: the userspace requested ops
127 	 *
128 	 * The userspace requested ops are copied/parsed and validated
129 	 * before we start applying the updates to try to do as much up-
130 	 * front error checking as possible, to avoid the VM being in an
131 	 * undefined state due to partially executed VM_BIND.
132 	 *
133 	 * This table also serves to hold a reference to the backing GEM
134 	 * objects.
135 	 */
136 	struct msm_vm_bind_op {
137 		uint32_t op;
138 		uint32_t flags;
139 		union {
140 			struct drm_gem_object *obj;
141 			uint32_t handle;
142 		};
143 		uint64_t obj_offset;
144 		uint64_t iova;
145 		uint64_t range;
146 	} ops[];
147 };
148 
149 #define job_foreach_bo(obj, _job) \
150 	for (unsigned i = 0; i < (_job)->nr_ops; i++) \
151 		if ((obj = (_job)->ops[i].obj))
152 
to_msm_vm_bind_job(struct drm_sched_job * job)153 static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job)
154 {
155 	return container_of(job, struct msm_vm_bind_job, base);
156 }
157 
158 static void
msm_gem_vm_free(struct drm_gpuvm * gpuvm)159 msm_gem_vm_free(struct drm_gpuvm *gpuvm)
160 {
161 	struct msm_gem_vm *vm = container_of(gpuvm, struct msm_gem_vm, base);
162 
163 	drm_mm_takedown(&vm->mm);
164 	if (vm->mmu)
165 		vm->mmu->funcs->destroy(vm->mmu);
166 	dma_fence_put(vm->last_fence);
167 	put_pid(vm->pid);
168 	kfree(vm->log);
169 	kfree(vm);
170 }
171 
172 /**
173  * msm_gem_vm_unusable() - Mark a VM as unusable
174  * @gpuvm: the VM to mark unusable
175  */
176 void
msm_gem_vm_unusable(struct drm_gpuvm * gpuvm)177 msm_gem_vm_unusable(struct drm_gpuvm *gpuvm)
178 {
179 	struct msm_gem_vm *vm = to_msm_vm(gpuvm);
180 	uint32_t vm_log_len = (1 << vm->log_shift);
181 	uint32_t vm_log_mask = vm_log_len - 1;
182 	uint32_t nr_vm_logs;
183 	int first;
184 
185 	vm->unusable = true;
186 
187 	/* Bail if no log, or empty log: */
188 	if (!vm->log || !vm->log[0].op)
189 		return;
190 
191 	mutex_lock(&vm->mmu_lock);
192 
193 	/*
194 	 * log_idx is the next entry to overwrite, meaning it is the oldest, or
195 	 * first, entry (other than the special case handled below where the
196 	 * log hasn't wrapped around yet)
197 	 */
198 	first = vm->log_idx;
199 
200 	if (!vm->log[first].op) {
201 		/*
202 		 * If the next log entry has not been written yet, then only
203 		 * entries 0 to idx-1 are valid (ie. we haven't wrapped around
204 		 * yet)
205 		 */
206 		nr_vm_logs = MAX(0, first - 1);
207 		first = 0;
208 	} else {
209 		nr_vm_logs = vm_log_len;
210 	}
211 
212 	pr_err("vm-log:\n");
213 	for (int i = 0; i < nr_vm_logs; i++) {
214 		int idx = (i + first) & vm_log_mask;
215 		struct msm_gem_vm_log_entry *e = &vm->log[idx];
216 		pr_err("  - %s:%d: 0x%016llx-0x%016llx\n",
217 		       e->op, e->queue_id, e->iova,
218 		       e->iova + e->range);
219 	}
220 
221 	mutex_unlock(&vm->mmu_lock);
222 }
223 
224 static void
vm_log(struct msm_gem_vm * vm,const char * op,uint64_t iova,uint64_t range,int queue_id)225 vm_log(struct msm_gem_vm *vm, const char *op, uint64_t iova, uint64_t range, int queue_id)
226 {
227 	int idx;
228 
229 	if (!vm->managed)
230 		lockdep_assert_held(&vm->mmu_lock);
231 
232 	vm_dbg("%s:%p:%d: %016llx %016llx", op, vm, queue_id, iova, iova + range);
233 
234 	if (!vm->log)
235 		return;
236 
237 	idx = vm->log_idx;
238 	vm->log[idx].op = op;
239 	vm->log[idx].iova = iova;
240 	vm->log[idx].range = range;
241 	vm->log[idx].queue_id = queue_id;
242 	vm->log_idx = (vm->log_idx + 1) & ((1 << vm->log_shift) - 1);
243 }
244 
245 static void
vm_unmap_op(struct msm_gem_vm * vm,const struct msm_vm_unmap_op * op)246 vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op)
247 {
248 	const char *reason = op->reason;
249 
250 	if (!reason)
251 		reason = "unmap";
252 
253 	vm_log(vm, reason, op->iova, op->range, op->queue_id);
254 
255 	vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range);
256 }
257 
258 static int
vm_map_op(struct msm_gem_vm * vm,const struct msm_vm_map_op * op)259 vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op)
260 {
261 	vm_log(vm, "map", op->iova, op->range, op->queue_id);
262 
263 	return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset,
264 				   op->range, op->prot);
265 }
266 
267 /* Actually unmap memory for the vma */
msm_gem_vma_unmap(struct drm_gpuva * vma,const char * reason)268 void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason)
269 {
270 	struct msm_gem_vm *vm = to_msm_vm(vma->vm);
271 	struct msm_gem_vma *msm_vma = to_msm_vma(vma);
272 
273 	/* Don't do anything if the memory isn't mapped */
274 	if (!msm_vma->mapped)
275 		return;
276 
277 	/*
278 	 * The mmu_lock is only needed when preallocation is used.  But
279 	 * in that case we don't need to worry about recursion into
280 	 * shrinker
281 	 */
282 	if (!vm->managed)
283 		 mutex_lock(&vm->mmu_lock);
284 
285 	vm_unmap_op(vm, &(struct msm_vm_unmap_op){
286 		.iova = vma->va.addr,
287 		.range = vma->va.range,
288 		.reason = reason,
289 	});
290 
291 	if (!vm->managed)
292 		mutex_unlock(&vm->mmu_lock);
293 
294 	msm_vma->mapped = false;
295 }
296 
297 /* Map and pin vma: */
298 int
msm_gem_vma_map(struct drm_gpuva * vma,int prot,struct sg_table * sgt)299 msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
300 {
301 	struct msm_gem_vm *vm = to_msm_vm(vma->vm);
302 	struct msm_gem_vma *msm_vma = to_msm_vma(vma);
303 	int ret;
304 
305 	if (GEM_WARN_ON(!vma->va.addr))
306 		return -EINVAL;
307 
308 	if (msm_vma->mapped)
309 		return 0;
310 
311 	msm_vma->mapped = true;
312 
313 	/*
314 	 * The mmu_lock is only needed when preallocation is used.  But
315 	 * in that case we don't need to worry about recursion into
316 	 * shrinker
317 	 */
318 	if (!vm->managed)
319 		mutex_lock(&vm->mmu_lock);
320 
321 	/*
322 	 * NOTE: if not using pgtable preallocation, we cannot hold
323 	 * a lock across map/unmap which is also used in the job_run()
324 	 * path, as this can cause deadlock in job_run() vs shrinker/
325 	 * reclaim.
326 	 */
327 	ret = vm_map_op(vm, &(struct msm_vm_map_op){
328 		.iova = vma->va.addr,
329 		.range = vma->va.range,
330 		.offset = vma->gem.offset,
331 		.sgt = sgt,
332 		.prot = prot,
333 	});
334 
335 	if (!vm->managed)
336 		mutex_unlock(&vm->mmu_lock);
337 
338 	if (ret)
339 		msm_vma->mapped = false;
340 
341 	return ret;
342 }
343 
344 /* Close an iova.  Warn if it is still in use */
msm_gem_vma_close(struct drm_gpuva * vma)345 void msm_gem_vma_close(struct drm_gpuva *vma)
346 {
347 	struct msm_gem_vm *vm = to_msm_vm(vma->vm);
348 	struct msm_gem_vma *msm_vma = to_msm_vma(vma);
349 
350 	GEM_WARN_ON(msm_vma->mapped);
351 
352 	drm_gpuvm_resv_assert_held(&vm->base);
353 
354 	if (vma->gem.obj)
355 		msm_gem_assert_locked(vma->gem.obj);
356 
357 	if (vma->va.addr && vm->managed)
358 		drm_mm_remove_node(&msm_vma->node);
359 
360 	drm_gpuva_remove(vma);
361 	drm_gpuva_unlink(vma);
362 
363 	kfree(vma);
364 }
365 
366 /* Create a new vma and allocate an iova for it */
367 struct drm_gpuva *
msm_gem_vma_new(struct drm_gpuvm * gpuvm,struct drm_gem_object * obj,u64 offset,u64 range_start,u64 range_end)368 msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
369 		u64 offset, u64 range_start, u64 range_end)
370 {
371 	struct msm_gem_vm *vm = to_msm_vm(gpuvm);
372 	struct drm_gpuvm_bo *vm_bo;
373 	struct msm_gem_vma *vma;
374 	int ret;
375 
376 	drm_gpuvm_resv_assert_held(&vm->base);
377 
378 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
379 	if (!vma)
380 		return ERR_PTR(-ENOMEM);
381 
382 	if (vm->managed) {
383 		BUG_ON(offset != 0);
384 		BUG_ON(!obj);  /* NULL mappings not valid for kernel managed VM */
385 		ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
386 						obj->size, PAGE_SIZE, 0,
387 						range_start, range_end, 0);
388 
389 		if (ret)
390 			goto err_free_vma;
391 
392 		range_start = vma->node.start;
393 		range_end   = range_start + obj->size;
394 	}
395 
396 	if (obj)
397 		GEM_WARN_ON((range_end - range_start) > obj->size);
398 
399 	struct drm_gpuva_op_map op_map = {
400 		.va.addr = range_start,
401 		.va.range = range_end - range_start,
402 		.gem.obj = obj,
403 		.gem.offset = offset,
404 	};
405 
406 	drm_gpuva_init_from_op(&vma->base, &op_map);
407 	vma->mapped = false;
408 
409 	ret = drm_gpuva_insert(&vm->base, &vma->base);
410 	if (ret)
411 		goto err_free_range;
412 
413 	if (!obj)
414 		return &vma->base;
415 
416 	vm_bo = drm_gpuvm_bo_obtain(&vm->base, obj);
417 	if (IS_ERR(vm_bo)) {
418 		ret = PTR_ERR(vm_bo);
419 		goto err_va_remove;
420 	}
421 
422 	drm_gpuvm_bo_extobj_add(vm_bo);
423 	drm_gpuva_link(&vma->base, vm_bo);
424 	GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo));
425 
426 	return &vma->base;
427 
428 err_va_remove:
429 	drm_gpuva_remove(&vma->base);
430 err_free_range:
431 	if (vm->managed)
432 		drm_mm_remove_node(&vma->node);
433 err_free_vma:
434 	kfree(vma);
435 	return ERR_PTR(ret);
436 }
437 
438 static int
msm_gem_vm_bo_validate(struct drm_gpuvm_bo * vm_bo,struct drm_exec * exec)439 msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
440 {
441 	struct drm_gem_object *obj = vm_bo->obj;
442 	struct drm_gpuva *vma;
443 	int ret;
444 
445 	vm_dbg("validate: %p", obj);
446 
447 	msm_gem_assert_locked(obj);
448 
449 	drm_gpuvm_bo_for_each_va (vma, vm_bo) {
450 		ret = msm_gem_pin_vma_locked(obj, vma);
451 		if (ret)
452 			return ret;
453 	}
454 
455 	return 0;
456 }
457 
458 struct op_arg {
459 	unsigned flags;
460 	struct msm_vm_bind_job *job;
461 	const struct msm_vm_bind_op *op;
462 	bool kept;
463 };
464 
465 static int
vm_op_enqueue(struct op_arg * arg,struct msm_vm_op _op)466 vm_op_enqueue(struct op_arg *arg, struct msm_vm_op _op)
467 {
468 	struct msm_vm_op *op = kmalloc(sizeof(*op), GFP_KERNEL);
469 	if (!op)
470 		return -ENOMEM;
471 
472 	*op = _op;
473 	list_add_tail(&op->node, &arg->job->vm_ops);
474 
475 	if (op->obj)
476 		drm_gem_object_get(op->obj);
477 
478 	return 0;
479 }
480 
481 static struct drm_gpuva *
vma_from_op(struct op_arg * arg,struct drm_gpuva_op_map * op)482 vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op)
483 {
484 	return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset,
485 			       op->va.addr, op->va.addr + op->va.range);
486 }
487 
488 static int
msm_gem_vm_sm_step_map(struct drm_gpuva_op * op,void * _arg)489 msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg)
490 {
491 	struct op_arg *arg = _arg;
492 	struct msm_vm_bind_job *job = arg->job;
493 	struct drm_gem_object *obj = op->map.gem.obj;
494 	struct drm_gpuva *vma;
495 	struct sg_table *sgt;
496 	unsigned prot;
497 	int ret;
498 
499 	if (arg->kept)
500 		return 0;
501 
502 	vma = vma_from_op(arg, &op->map);
503 	if (WARN_ON(IS_ERR(vma)))
504 		return PTR_ERR(vma);
505 
506 	vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
507 	       vma->va.addr, vma->va.range);
508 
509 	if (obj) {
510 		sgt = to_msm_bo(obj)->sgt;
511 		prot = msm_gem_prot(obj);
512 	} else {
513 		sgt = NULL;
514 		prot = IOMMU_READ | IOMMU_WRITE;
515 	}
516 
517 	ret = vm_op_enqueue(arg, (struct msm_vm_op){
518 		.op = MSM_VM_OP_MAP,
519 		.map = {
520 			.sgt = sgt,
521 			.iova = vma->va.addr,
522 			.range = vma->va.range,
523 			.offset = vma->gem.offset,
524 			.prot = prot,
525 			.queue_id = job->queue->id,
526 		},
527 		.obj = vma->gem.obj,
528 	});
529 
530 	if (ret)
531 		return ret;
532 
533 	vma->flags = ((struct op_arg *)arg)->flags;
534 	to_msm_vma(vma)->mapped = true;
535 
536 	return 0;
537 }
538 
539 static int
msm_gem_vm_sm_step_remap(struct drm_gpuva_op * op,void * arg)540 msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
541 {
542 	struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
543 	struct drm_gpuvm *vm = job->vm;
544 	struct drm_gpuva *orig_vma = op->remap.unmap->va;
545 	struct drm_gpuva *prev_vma = NULL, *next_vma = NULL;
546 	struct drm_gpuvm_bo *vm_bo = orig_vma->vm_bo;
547 	bool mapped = to_msm_vma(orig_vma)->mapped;
548 	unsigned flags;
549 	int ret;
550 
551 	vm_dbg("orig_vma: %p:%p:%p: %016llx %016llx", vm, orig_vma,
552 	       orig_vma->gem.obj, orig_vma->va.addr, orig_vma->va.range);
553 
554 	if (mapped) {
555 		uint64_t unmap_start, unmap_range;
556 
557 		drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
558 
559 		ret = vm_op_enqueue(arg, (struct msm_vm_op){
560 			.op = MSM_VM_OP_UNMAP,
561 			.unmap = {
562 				.iova = unmap_start,
563 				.range = unmap_range,
564 				.queue_id = job->queue->id,
565 			},
566 			.obj = orig_vma->gem.obj,
567 		});
568 
569 		if (ret)
570 			return ret;
571 
572 		/*
573 		 * Part of this GEM obj is still mapped, but we're going to kill the
574 		 * existing VMA and replace it with one or two new ones (ie. two if
575 		 * the unmapped range is in the middle of the existing (unmap) VMA).
576 		 * So just set the state to unmapped:
577 		 */
578 		to_msm_vma(orig_vma)->mapped = false;
579 	}
580 
581 	/*
582 	 * Hold a ref to the vm_bo between the msm_gem_vma_close() and the
583 	 * creation of the new prev/next vma's, in case the vm_bo is tracked
584 	 * in the VM's evict list:
585 	 */
586 	if (vm_bo)
587 		drm_gpuvm_bo_get(vm_bo);
588 
589 	/*
590 	 * The prev_vma and/or next_vma are replacing the unmapped vma, and
591 	 * therefore should preserve it's flags:
592 	 */
593 	flags = orig_vma->flags;
594 
595 	msm_gem_vma_close(orig_vma);
596 
597 	if (op->remap.prev) {
598 		prev_vma = vma_from_op(arg, op->remap.prev);
599 		if (WARN_ON(IS_ERR(prev_vma)))
600 			return PTR_ERR(prev_vma);
601 
602 		vm_dbg("prev_vma: %p:%p: %016llx %016llx", vm, prev_vma, prev_vma->va.addr, prev_vma->va.range);
603 		to_msm_vma(prev_vma)->mapped = mapped;
604 		prev_vma->flags = flags;
605 	}
606 
607 	if (op->remap.next) {
608 		next_vma = vma_from_op(arg, op->remap.next);
609 		if (WARN_ON(IS_ERR(next_vma)))
610 			return PTR_ERR(next_vma);
611 
612 		vm_dbg("next_vma: %p:%p: %016llx %016llx", vm, next_vma, next_vma->va.addr, next_vma->va.range);
613 		to_msm_vma(next_vma)->mapped = mapped;
614 		next_vma->flags = flags;
615 	}
616 
617 	if (!mapped)
618 		drm_gpuvm_bo_evict(vm_bo, true);
619 
620 	/* Drop the previous ref: */
621 	drm_gpuvm_bo_put(vm_bo);
622 
623 	return 0;
624 }
625 
626 static int
msm_gem_vm_sm_step_unmap(struct drm_gpuva_op * op,void * _arg)627 msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg)
628 {
629 	struct op_arg *arg = _arg;
630 	struct msm_vm_bind_job *job = arg->job;
631 	struct drm_gpuva *vma = op->unmap.va;
632 	struct msm_gem_vma *msm_vma = to_msm_vma(vma);
633 	int ret;
634 
635 	vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
636 	       vma->va.addr, vma->va.range);
637 
638 	/*
639 	 * Detect in-place remap.  Turnip does this to change the vma flags,
640 	 * in particular MSM_VMA_DUMP.  In this case we want to avoid actually
641 	 * touching the page tables, as that would require synchronization
642 	 * against SUBMIT jobs running on the GPU.
643 	 */
644 	if (op->unmap.keep &&
645 	    (arg->op->op == MSM_VM_BIND_OP_MAP) &&
646 	    (vma->gem.obj == arg->op->obj) &&
647 	    (vma->gem.offset == arg->op->obj_offset) &&
648 	    (vma->va.addr == arg->op->iova) &&
649 	    (vma->va.range == arg->op->range)) {
650 		/* We are only expecting a single in-place unmap+map cb pair: */
651 		WARN_ON(arg->kept);
652 
653 		/* Leave the existing VMA in place, but signal that to the map cb: */
654 		arg->kept = true;
655 
656 		/* Only flags are changing, so update that in-place: */
657 		unsigned orig_flags = vma->flags & (DRM_GPUVA_USERBITS - 1);
658 		vma->flags = orig_flags | arg->flags;
659 
660 		return 0;
661 	}
662 
663 	if (!msm_vma->mapped)
664 		goto out_close;
665 
666 	ret = vm_op_enqueue(arg, (struct msm_vm_op){
667 		.op = MSM_VM_OP_UNMAP,
668 		.unmap = {
669 			.iova = vma->va.addr,
670 			.range = vma->va.range,
671 			.queue_id = job->queue->id,
672 		},
673 		.obj = vma->gem.obj,
674 	});
675 
676 	if (ret)
677 		return ret;
678 
679 	msm_vma->mapped = false;
680 
681 out_close:
682 	msm_gem_vma_close(vma);
683 
684 	return 0;
685 }
686 
687 static const struct drm_gpuvm_ops msm_gpuvm_ops = {
688 	.vm_free = msm_gem_vm_free,
689 	.vm_bo_validate = msm_gem_vm_bo_validate,
690 	.sm_step_map = msm_gem_vm_sm_step_map,
691 	.sm_step_remap = msm_gem_vm_sm_step_remap,
692 	.sm_step_unmap = msm_gem_vm_sm_step_unmap,
693 };
694 
695 static struct dma_fence *
msm_vma_job_run(struct drm_sched_job * _job)696 msm_vma_job_run(struct drm_sched_job *_job)
697 {
698 	struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job);
699 	struct msm_gem_vm *vm = to_msm_vm(job->vm);
700 	struct drm_gem_object *obj;
701 	int ret = vm->unusable ? -EINVAL : 0;
702 
703 	vm_dbg("");
704 
705 	mutex_lock(&vm->mmu_lock);
706 	vm->mmu->prealloc = &job->prealloc;
707 
708 	while (!list_empty(&job->vm_ops)) {
709 		struct msm_vm_op *op =
710 			list_first_entry(&job->vm_ops, struct msm_vm_op, node);
711 
712 		switch (op->op) {
713 		case MSM_VM_OP_MAP:
714 			/*
715 			 * On error, stop trying to map new things.. but we
716 			 * still want to process the unmaps (or in particular,
717 			 * the drm_gem_object_put()s)
718 			 */
719 			if (!ret)
720 				ret = vm_map_op(vm, &op->map);
721 			break;
722 		case MSM_VM_OP_UNMAP:
723 			vm_unmap_op(vm, &op->unmap);
724 			break;
725 		}
726 		drm_gem_object_put(op->obj);
727 		list_del(&op->node);
728 		kfree(op);
729 	}
730 
731 	vm->mmu->prealloc = NULL;
732 	mutex_unlock(&vm->mmu_lock);
733 
734 	/*
735 	 * We failed to perform at least _some_ of the pgtable updates, so
736 	 * now the VM is in an undefined state.  Game over!
737 	 */
738 	if (ret)
739 		msm_gem_vm_unusable(job->vm);
740 
741 	job_foreach_bo (obj, job) {
742 		msm_gem_lock(obj);
743 		msm_gem_unpin_locked(obj);
744 		msm_gem_unlock(obj);
745 	}
746 
747 	/* VM_BIND ops are synchronous, so no fence to wait on: */
748 	return NULL;
749 }
750 
751 static void
msm_vma_job_free(struct drm_sched_job * _job)752 msm_vma_job_free(struct drm_sched_job *_job)
753 {
754 	struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job);
755 	struct msm_gem_vm *vm = to_msm_vm(job->vm);
756 	struct drm_gem_object *obj;
757 
758 	vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc);
759 
760 	atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight);
761 
762 	drm_sched_job_cleanup(_job);
763 
764 	job_foreach_bo (obj, job)
765 		drm_gem_object_put(obj);
766 
767 	msm_submitqueue_put(job->queue);
768 	dma_fence_put(job->fence);
769 
770 	/* In error paths, we could have unexecuted ops: */
771 	while (!list_empty(&job->vm_ops)) {
772 		struct msm_vm_op *op =
773 			list_first_entry(&job->vm_ops, struct msm_vm_op, node);
774 		list_del(&op->node);
775 		kfree(op);
776 	}
777 
778 	wake_up(&vm->prealloc_throttle.wait);
779 
780 	kfree(job);
781 }
782 
783 static const struct drm_sched_backend_ops msm_vm_bind_ops = {
784 	.run_job = msm_vma_job_run,
785 	.free_job = msm_vma_job_free
786 };
787 
788 /**
789  * msm_gem_vm_create() - Create and initialize a &msm_gem_vm
790  * @drm: the drm device
791  * @mmu: the backing MMU objects handling mapping/unmapping
792  * @name: the name of the VM
793  * @va_start: the start offset of the VA space
794  * @va_size: the size of the VA space
795  * @managed: is it a kernel managed VM?
796  *
797  * In a kernel managed VM, the kernel handles address allocation, and only
798  * synchronous operations are supported.  In a user managed VM, userspace
799  * handles virtual address allocation, and both async and sync operations
800  * are supported.
801  */
802 struct drm_gpuvm *
msm_gem_vm_create(struct drm_device * drm,struct msm_mmu * mmu,const char * name,u64 va_start,u64 va_size,bool managed)803 msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
804 		  u64 va_start, u64 va_size, bool managed)
805 {
806 	/*
807 	 * We mostly want to use DRM_GPUVM_RESV_PROTECTED, except that
808 	 * makes drm_gpuvm_bo_evict() a no-op for extobjs (ie. we loose
809 	 * tracking that an extobj is evicted) :facepalm:
810 	 */
811 	enum drm_gpuvm_flags flags = 0;
812 	struct msm_gem_vm *vm;
813 	struct drm_gem_object *dummy_gem;
814 	int ret = 0;
815 
816 	if (IS_ERR(mmu))
817 		return ERR_CAST(mmu);
818 
819 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
820 	if (!vm)
821 		return ERR_PTR(-ENOMEM);
822 
823 	dummy_gem = drm_gpuvm_resv_object_alloc(drm);
824 	if (!dummy_gem) {
825 		ret = -ENOMEM;
826 		goto err_free_vm;
827 	}
828 
829 	if (!managed) {
830 		struct drm_sched_init_args args = {
831 			.ops = &msm_vm_bind_ops,
832 			.num_rqs = 1,
833 			.credit_limit = 1,
834 			.timeout = MAX_SCHEDULE_TIMEOUT,
835 			.name = "msm-vm-bind",
836 			.dev = drm->dev,
837 		};
838 
839 		ret = drm_sched_init(&vm->sched, &args);
840 		if (ret)
841 			goto err_free_dummy;
842 
843 		init_waitqueue_head(&vm->prealloc_throttle.wait);
844 	}
845 
846 	drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem,
847 		       va_start, va_size, 0, 0, &msm_gpuvm_ops);
848 	drm_gem_object_put(dummy_gem);
849 
850 	vm->mmu = mmu;
851 	mutex_init(&vm->mmu_lock);
852 	vm->managed = managed;
853 
854 	drm_mm_init(&vm->mm, va_start, va_size);
855 
856 	/*
857 	 * We don't really need vm log for kernel managed VMs, as the kernel
858 	 * is responsible for ensuring that GEM objs are mapped if they are
859 	 * used by a submit.  Furthermore we piggyback on mmu_lock to serialize
860 	 * access to the log.
861 	 *
862 	 * Limit the max log_shift to 8 to prevent userspace from asking us
863 	 * for an unreasonable log size.
864 	 */
865 	if (!managed)
866 		vm->log_shift = MIN(vm_log_shift, 8);
867 
868 	if (vm->log_shift) {
869 		vm->log = kmalloc_array(1 << vm->log_shift, sizeof(vm->log[0]),
870 					GFP_KERNEL | __GFP_ZERO);
871 	}
872 
873 	return &vm->base;
874 
875 err_free_dummy:
876 	drm_gem_object_put(dummy_gem);
877 
878 err_free_vm:
879 	kfree(vm);
880 	return ERR_PTR(ret);
881 }
882 
883 /**
884  * msm_gem_vm_close() - Close a VM
885  * @gpuvm: The VM to close
886  *
887  * Called when the drm device file is closed, to tear down VM related resources
888  * (which will drop refcounts to GEM objects that were still mapped into the
889  * VM at the time).
890  */
891 void
msm_gem_vm_close(struct drm_gpuvm * gpuvm)892 msm_gem_vm_close(struct drm_gpuvm *gpuvm)
893 {
894 	struct msm_gem_vm *vm = to_msm_vm(gpuvm);
895 	struct drm_gpuva *vma, *tmp;
896 	struct drm_exec exec;
897 
898 	/*
899 	 * For kernel managed VMs, the VMAs are torn down when the handle is
900 	 * closed, so nothing more to do.
901 	 */
902 	if (vm->managed)
903 		return;
904 
905 	if (vm->last_fence)
906 		dma_fence_wait(vm->last_fence, false);
907 
908 	/* Kill the scheduler now, so we aren't racing with it for cleanup: */
909 	drm_sched_stop(&vm->sched, NULL);
910 	drm_sched_fini(&vm->sched);
911 
912 	/* Tear down any remaining mappings: */
913 	drm_exec_init(&exec, 0, 2);
914 	drm_exec_until_all_locked (&exec) {
915 		drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(gpuvm));
916 		drm_exec_retry_on_contention(&exec);
917 
918 		drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) {
919 			struct drm_gem_object *obj = vma->gem.obj;
920 
921 			/*
922 			 * MSM_BO_NO_SHARE objects share the same resv as the
923 			 * VM, in which case the obj is already locked:
924 			 */
925 			if (obj && (obj->resv == drm_gpuvm_resv(gpuvm)))
926 				obj = NULL;
927 
928 			if (obj) {
929 				drm_exec_lock_obj(&exec, obj);
930 				drm_exec_retry_on_contention(&exec);
931 			}
932 
933 			msm_gem_vma_unmap(vma, "close");
934 			msm_gem_vma_close(vma);
935 
936 			if (obj) {
937 				drm_exec_unlock_obj(&exec, obj);
938 			}
939 		}
940 	}
941 	drm_exec_fini(&exec);
942 }
943 
944 
945 static struct msm_vm_bind_job *
vm_bind_job_create(struct drm_device * dev,struct drm_file * file,struct msm_gpu_submitqueue * queue,uint32_t nr_ops)946 vm_bind_job_create(struct drm_device *dev, struct drm_file *file,
947 		   struct msm_gpu_submitqueue *queue, uint32_t nr_ops)
948 {
949 	struct msm_vm_bind_job *job;
950 	uint64_t sz;
951 	int ret;
952 
953 	sz = struct_size(job, ops, nr_ops);
954 
955 	if (sz > SIZE_MAX)
956 		return ERR_PTR(-ENOMEM);
957 
958 	job = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN);
959 	if (!job)
960 		return ERR_PTR(-ENOMEM);
961 
962 	ret = drm_sched_job_init(&job->base, queue->entity, 1, queue,
963 				 file->client_id);
964 	if (ret) {
965 		kfree(job);
966 		return ERR_PTR(ret);
967 	}
968 
969 	job->vm = msm_context_vm(dev, queue->ctx);
970 	job->queue = queue;
971 	INIT_LIST_HEAD(&job->vm_ops);
972 
973 	return job;
974 }
975 
invalid_alignment(uint64_t addr)976 static bool invalid_alignment(uint64_t addr)
977 {
978 	/*
979 	 * Technically this is about GPU alignment, not CPU alignment.  But
980 	 * I've not seen any qcom SoC where the SMMU does not support the
981 	 * CPU's smallest page size.
982 	 */
983 	return !PAGE_ALIGNED(addr);
984 }
985 
986 static int
lookup_op(struct msm_vm_bind_job * job,const struct drm_msm_vm_bind_op * op)987 lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
988 {
989 	struct drm_device *dev = job->vm->drm;
990 	struct msm_drm_private *priv = dev->dev_private;
991 	int i = job->nr_ops++;
992 	int ret = 0;
993 
994 	job->ops[i].op = op->op;
995 	job->ops[i].handle = op->handle;
996 	job->ops[i].obj_offset = op->obj_offset;
997 	job->ops[i].iova = op->iova;
998 	job->ops[i].range = op->range;
999 	job->ops[i].flags = op->flags;
1000 
1001 	if (op->flags & ~MSM_VM_BIND_OP_FLAGS)
1002 		ret = UERR(EINVAL, dev, "invalid flags: %x\n", op->flags);
1003 
1004 	if (invalid_alignment(op->iova))
1005 		ret = UERR(EINVAL, dev, "invalid address: %016llx\n", op->iova);
1006 
1007 	if (invalid_alignment(op->obj_offset))
1008 		ret = UERR(EINVAL, dev, "invalid bo_offset: %016llx\n", op->obj_offset);
1009 
1010 	if (invalid_alignment(op->range))
1011 		ret = UERR(EINVAL, dev, "invalid range: %016llx\n", op->range);
1012 
1013 	if (!drm_gpuvm_range_valid(job->vm, op->iova, op->range))
1014 		ret = UERR(EINVAL, dev, "invalid range: %016llx, %016llx\n", op->iova, op->range);
1015 
1016 	/*
1017 	 * MAP must specify a valid handle.  But the handle MBZ for
1018 	 * UNMAP or MAP_NULL.
1019 	 */
1020 	if (op->op == MSM_VM_BIND_OP_MAP) {
1021 		if (!op->handle)
1022 			ret = UERR(EINVAL, dev, "invalid handle\n");
1023 	} else if (op->handle) {
1024 		ret = UERR(EINVAL, dev, "handle must be zero\n");
1025 	}
1026 
1027 	switch (op->op) {
1028 	case MSM_VM_BIND_OP_MAP:
1029 	case MSM_VM_BIND_OP_MAP_NULL:
1030 	case MSM_VM_BIND_OP_UNMAP:
1031 		break;
1032 	default:
1033 		ret = UERR(EINVAL, dev, "invalid op: %u\n", op->op);
1034 		break;
1035 	}
1036 
1037 	if ((op->op == MSM_VM_BIND_OP_MAP_NULL) &&
1038 	    !adreno_smmu_has_prr(priv->gpu)) {
1039 		ret = UERR(EINVAL, dev, "PRR not supported\n");
1040 	}
1041 
1042 	return ret;
1043 }
1044 
1045 /*
1046  * ioctl parsing, parameter validation, and GEM handle lookup
1047  */
1048 static int
vm_bind_job_lookup_ops(struct msm_vm_bind_job * job,struct drm_msm_vm_bind * args,struct drm_file * file,int * nr_bos)1049 vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args,
1050 		       struct drm_file *file, int *nr_bos)
1051 {
1052 	struct drm_device *dev = job->vm->drm;
1053 	int ret = 0;
1054 	int cnt = 0;
1055 	int i = -1;
1056 
1057 	if (args->nr_ops == 1) {
1058 		/* Single op case, the op is inlined: */
1059 		ret = lookup_op(job, &args->op);
1060 	} else {
1061 		for (unsigned i = 0; i < args->nr_ops; i++) {
1062 			struct drm_msm_vm_bind_op op;
1063 			void __user *userptr =
1064 				u64_to_user_ptr(args->ops + (i * sizeof(op)));
1065 
1066 			/* make sure we don't have garbage flags, in case we hit
1067 			 * error path before flags is initialized:
1068 			 */
1069 			job->ops[i].flags = 0;
1070 
1071 			if (copy_from_user(&op, userptr, sizeof(op))) {
1072 				ret = -EFAULT;
1073 				break;
1074 			}
1075 
1076 			ret = lookup_op(job, &op);
1077 			if (ret)
1078 				break;
1079 		}
1080 	}
1081 
1082 	if (ret) {
1083 		job->nr_ops = 0;
1084 		goto out;
1085 	}
1086 
1087 	spin_lock(&file->table_lock);
1088 
1089 	for (i = 0; i < args->nr_ops; i++) {
1090 		struct msm_vm_bind_op *op = &job->ops[i];
1091 		struct drm_gem_object *obj;
1092 
1093 		if (!op->handle) {
1094 			op->obj = NULL;
1095 			continue;
1096 		}
1097 
1098 		/*
1099 		 * normally use drm_gem_object_lookup(), but for bulk lookup
1100 		 * all under single table_lock just hit object_idr directly:
1101 		 */
1102 		obj = idr_find(&file->object_idr, op->handle);
1103 		if (!obj) {
1104 			ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", op->handle, i);
1105 			goto out_unlock;
1106 		}
1107 
1108 		drm_gem_object_get(obj);
1109 
1110 		op->obj = obj;
1111 		cnt++;
1112 
1113 		if ((op->range + op->obj_offset) > obj->size) {
1114 			ret = UERR(EINVAL, dev, "invalid range: %016llx + %016llx > %016zx\n",
1115 				   op->range, op->obj_offset, obj->size);
1116 			goto out_unlock;
1117 		}
1118 	}
1119 
1120 	*nr_bos = cnt;
1121 
1122 out_unlock:
1123 	spin_unlock(&file->table_lock);
1124 
1125 	if (ret) {
1126 		for (; i >= 0; i--) {
1127 			struct msm_vm_bind_op *op = &job->ops[i];
1128 
1129 			if (!op->obj)
1130 				continue;
1131 
1132 			drm_gem_object_put(op->obj);
1133 			op->obj = NULL;
1134 		}
1135 	}
1136 out:
1137 	return ret;
1138 }
1139 
1140 static void
prealloc_count(struct msm_vm_bind_job * job,struct msm_vm_bind_op * first,struct msm_vm_bind_op * last)1141 prealloc_count(struct msm_vm_bind_job *job,
1142 	       struct msm_vm_bind_op *first,
1143 	       struct msm_vm_bind_op *last)
1144 {
1145 	struct msm_mmu *mmu = to_msm_vm(job->vm)->mmu;
1146 
1147 	if (!first)
1148 		return;
1149 
1150 	uint64_t start_iova = first->iova;
1151 	uint64_t end_iova = last->iova + last->range;
1152 
1153 	mmu->funcs->prealloc_count(mmu, &job->prealloc, start_iova, end_iova - start_iova);
1154 }
1155 
1156 static bool
ops_are_same_pte(struct msm_vm_bind_op * first,struct msm_vm_bind_op * next)1157 ops_are_same_pte(struct msm_vm_bind_op *first, struct msm_vm_bind_op *next)
1158 {
1159 	/*
1160 	 * Last level pte covers 2MB.. so we should merge two ops, from
1161 	 * the PoV of figuring out how much pgtable pages to pre-allocate
1162 	 * if they land in the same 2MB range:
1163 	 */
1164 	uint64_t pte_mask = ~(SZ_2M - 1);
1165 	return ((first->iova + first->range) & pte_mask) == (next->iova & pte_mask);
1166 }
1167 
1168 /*
1169  * Determine the amount of memory to prealloc for pgtables.  For sparse images,
1170  * in particular, userspace plays some tricks with the order of page mappings
1171  * to get the desired swizzle pattern, resulting in a large # of tiny MAP ops.
1172  * So detect when multiple MAP operations are physically contiguous, and count
1173  * them as a single mapping.  Otherwise the prealloc_count() will not realize
1174  * they can share pagetable pages and vastly overcount.
1175  */
1176 static int
vm_bind_prealloc_count(struct msm_vm_bind_job * job)1177 vm_bind_prealloc_count(struct msm_vm_bind_job *job)
1178 {
1179 	struct msm_vm_bind_op *first = NULL, *last = NULL;
1180 	struct msm_gem_vm *vm = to_msm_vm(job->vm);
1181 	int ret;
1182 
1183 	for (int i = 0; i < job->nr_ops; i++) {
1184 		struct msm_vm_bind_op *op = &job->ops[i];
1185 
1186 		/* We only care about MAP/MAP_NULL: */
1187 		if (op->op == MSM_VM_BIND_OP_UNMAP)
1188 			continue;
1189 
1190 		/*
1191 		 * If op is contiguous with last in the current range, then
1192 		 * it becomes the new last in the range and we continue
1193 		 * looping:
1194 		 */
1195 		if (last && ops_are_same_pte(last, op)) {
1196 			last = op;
1197 			continue;
1198 		}
1199 
1200 		/*
1201 		 * If op is not contiguous with the current range, flush
1202 		 * the current range and start anew:
1203 		 */
1204 		prealloc_count(job, first, last);
1205 		first = last = op;
1206 	}
1207 
1208 	/* Flush the remaining range: */
1209 	prealloc_count(job, first, last);
1210 
1211 	/*
1212 	 * Now that we know the needed amount to pre-alloc, throttle on pending
1213 	 * VM_BIND jobs if we already have too much pre-alloc memory in flight
1214 	 */
1215 	ret = wait_event_interruptible(
1216 			vm->prealloc_throttle.wait,
1217 			atomic_read(&vm->prealloc_throttle.in_flight) <= 1024);
1218 	if (ret)
1219 		return ret;
1220 
1221 	atomic_add(job->prealloc.count, &vm->prealloc_throttle.in_flight);
1222 
1223 	return 0;
1224 }
1225 
1226 /*
1227  * Lock VM and GEM objects
1228  */
1229 static int
vm_bind_job_lock_objects(struct msm_vm_bind_job * job,struct drm_exec * exec)1230 vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
1231 {
1232 	int ret;
1233 
1234 	/* Lock VM and objects: */
1235 	drm_exec_until_all_locked (exec) {
1236 		ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(job->vm));
1237 		drm_exec_retry_on_contention(exec);
1238 		if (ret)
1239 			return ret;
1240 
1241 		for (unsigned i = 0; i < job->nr_ops; i++) {
1242 			const struct msm_vm_bind_op *op = &job->ops[i];
1243 
1244 			switch (op->op) {
1245 			case MSM_VM_BIND_OP_UNMAP:
1246 				ret = drm_gpuvm_sm_unmap_exec_lock(job->vm, exec,
1247 							      op->iova,
1248 							      op->obj_offset);
1249 				break;
1250 			case MSM_VM_BIND_OP_MAP:
1251 			case MSM_VM_BIND_OP_MAP_NULL: {
1252 				struct drm_gpuvm_map_req map_req = {
1253 					.map.va.addr = op->iova,
1254 					.map.va.range = op->range,
1255 					.map.gem.obj = op->obj,
1256 					.map.gem.offset = op->obj_offset,
1257 				};
1258 
1259 				ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
1260 				break;
1261 			}
1262 			default:
1263 				/*
1264 				 * lookup_op() should have already thrown an error for
1265 				 * invalid ops
1266 				 */
1267 				WARN_ON("unreachable");
1268 			}
1269 
1270 			drm_exec_retry_on_contention(exec);
1271 			if (ret)
1272 				return ret;
1273 		}
1274 	}
1275 
1276 	return 0;
1277 }
1278 
1279 /*
1280  * Pin GEM objects, ensuring that we have backing pages.  Pinning will move
1281  * the object to the pinned LRU so that the shrinker knows to first consider
1282  * other objects for evicting.
1283  */
1284 static int
vm_bind_job_pin_objects(struct msm_vm_bind_job * job)1285 vm_bind_job_pin_objects(struct msm_vm_bind_job *job)
1286 {
1287 	struct drm_gem_object *obj;
1288 
1289 	/*
1290 	 * First loop, before holding the LRU lock, avoids holding the
1291 	 * LRU lock while calling msm_gem_pin_vma_locked (which could
1292 	 * trigger get_pages())
1293 	 */
1294 	job_foreach_bo (obj, job) {
1295 		struct page **pages;
1296 
1297 		pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
1298 		if (IS_ERR(pages))
1299 			return PTR_ERR(pages);
1300 	}
1301 
1302 	struct msm_drm_private *priv = job->vm->drm->dev_private;
1303 
1304 	/*
1305 	 * A second loop while holding the LRU lock (a) avoids acquiring/dropping
1306 	 * the LRU lock for each individual bo, while (b) avoiding holding the
1307 	 * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
1308 	 * get_pages() which could trigger reclaim.. and if we held the LRU lock
1309 	 * could trigger deadlock with the shrinker).
1310 	 */
1311 	mutex_lock(&priv->lru.lock);
1312 	job_foreach_bo (obj, job)
1313 		msm_gem_pin_obj_locked(obj);
1314 	mutex_unlock(&priv->lru.lock);
1315 
1316 	job->bos_pinned = true;
1317 
1318 	return 0;
1319 }
1320 
1321 /*
1322  * Unpin GEM objects.  Normally this is done after the bind job is run.
1323  */
1324 static void
vm_bind_job_unpin_objects(struct msm_vm_bind_job * job)1325 vm_bind_job_unpin_objects(struct msm_vm_bind_job *job)
1326 {
1327 	struct drm_gem_object *obj;
1328 
1329 	if (!job->bos_pinned)
1330 		return;
1331 
1332 	job_foreach_bo (obj, job)
1333 		msm_gem_unpin_locked(obj);
1334 
1335 	job->bos_pinned = false;
1336 }
1337 
1338 /*
1339  * Pre-allocate pgtable memory, and translate the VM bind requests into a
1340  * sequence of pgtable updates to be applied asynchronously.
1341  */
1342 static int
vm_bind_job_prepare(struct msm_vm_bind_job * job)1343 vm_bind_job_prepare(struct msm_vm_bind_job *job)
1344 {
1345 	struct msm_gem_vm *vm = to_msm_vm(job->vm);
1346 	struct msm_mmu *mmu = vm->mmu;
1347 	int ret;
1348 
1349 	ret = mmu->funcs->prealloc_allocate(mmu, &job->prealloc);
1350 	if (ret)
1351 		return ret;
1352 
1353 	for (unsigned i = 0; i < job->nr_ops; i++) {
1354 		const struct msm_vm_bind_op *op = &job->ops[i];
1355 		struct op_arg arg = {
1356 			.job = job,
1357 			.op = op,
1358 		};
1359 
1360 		switch (op->op) {
1361 		case MSM_VM_BIND_OP_UNMAP:
1362 			ret = drm_gpuvm_sm_unmap(job->vm, &arg, op->iova,
1363 						 op->range);
1364 			break;
1365 		case MSM_VM_BIND_OP_MAP:
1366 			if (op->flags & MSM_VM_BIND_OP_DUMP)
1367 				arg.flags |= MSM_VMA_DUMP;
1368 			fallthrough;
1369 		case MSM_VM_BIND_OP_MAP_NULL: {
1370 			struct drm_gpuvm_map_req map_req = {
1371 				.map.va.addr = op->iova,
1372 				.map.va.range = op->range,
1373 				.map.gem.obj = op->obj,
1374 				.map.gem.offset = op->obj_offset,
1375 			};
1376 
1377 			ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
1378 			break;
1379 		}
1380 		default:
1381 			/*
1382 			 * lookup_op() should have already thrown an error for
1383 			 * invalid ops
1384 			 */
1385 			BUG_ON("unreachable");
1386 		}
1387 
1388 		if (ret) {
1389 			/*
1390 			 * If we've already started modifying the vm, we can't
1391 			 * adequetly describe to userspace the intermediate
1392 			 * state the vm is in.  So throw up our hands!
1393 			 */
1394 			if (i > 0)
1395 				msm_gem_vm_unusable(job->vm);
1396 			return ret;
1397 		}
1398 	}
1399 
1400 	return 0;
1401 }
1402 
1403 /*
1404  * Attach fences to the GEM objects being bound.  This will signify to
1405  * the shrinker that they are busy even after dropping the locks (ie.
1406  * drm_exec_fini())
1407  */
1408 static void
vm_bind_job_attach_fences(struct msm_vm_bind_job * job)1409 vm_bind_job_attach_fences(struct msm_vm_bind_job *job)
1410 {
1411 	for (unsigned i = 0; i < job->nr_ops; i++) {
1412 		struct drm_gem_object *obj = job->ops[i].obj;
1413 
1414 		if (!obj)
1415 			continue;
1416 
1417 		dma_resv_add_fence(obj->resv, job->fence,
1418 				   DMA_RESV_USAGE_KERNEL);
1419 	}
1420 }
1421 
1422 int
msm_ioctl_vm_bind(struct drm_device * dev,void * data,struct drm_file * file)1423 msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
1424 {
1425 	struct msm_drm_private *priv = dev->dev_private;
1426 	struct drm_msm_vm_bind *args = data;
1427 	struct msm_context *ctx = file->driver_priv;
1428 	struct msm_vm_bind_job *job = NULL;
1429 	struct msm_gpu *gpu = priv->gpu;
1430 	struct msm_gpu_submitqueue *queue;
1431 	struct msm_syncobj_post_dep *post_deps = NULL;
1432 	struct drm_syncobj **syncobjs_to_reset = NULL;
1433 	struct sync_file *sync_file = NULL;
1434 	struct dma_fence *fence;
1435 	int out_fence_fd = -1;
1436 	int ret, nr_bos = 0;
1437 	unsigned i;
1438 
1439 	if (!gpu)
1440 		return -ENXIO;
1441 
1442 	/*
1443 	 * Maybe we could allow just UNMAP ops?  OTOH userspace should just
1444 	 * immediately close the device file and all will be torn down.
1445 	 */
1446 	if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)
1447 		return UERR(EPIPE, dev, "context is unusable");
1448 
1449 	/*
1450 	 * Technically, you cannot create a VM_BIND submitqueue in the first
1451 	 * place, if you haven't opted in to VM_BIND context.  But it is
1452 	 * cleaner / less confusing, to check this case directly.
1453 	 */
1454 	if (!msm_context_is_vmbind(ctx))
1455 		return UERR(EINVAL, dev, "context does not support vmbind");
1456 
1457 	if (args->flags & ~MSM_VM_BIND_FLAGS)
1458 		return UERR(EINVAL, dev, "invalid flags");
1459 
1460 	queue = msm_submitqueue_get(ctx, args->queue_id);
1461 	if (!queue)
1462 		return -ENOENT;
1463 
1464 	if (!(queue->flags & MSM_SUBMITQUEUE_VM_BIND)) {
1465 		ret = UERR(EINVAL, dev, "Invalid queue type");
1466 		goto out_post_unlock;
1467 	}
1468 
1469 	if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
1470 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
1471 		if (out_fence_fd < 0) {
1472 			ret = out_fence_fd;
1473 			goto out_post_unlock;
1474 		}
1475 	}
1476 
1477 	job = vm_bind_job_create(dev, file, queue, args->nr_ops);
1478 	if (IS_ERR(job)) {
1479 		ret = PTR_ERR(job);
1480 		goto out_post_unlock;
1481 	}
1482 
1483 	ret = mutex_lock_interruptible(&queue->lock);
1484 	if (ret)
1485 		goto out_post_unlock;
1486 
1487 	if (args->flags & MSM_VM_BIND_FENCE_FD_IN) {
1488 		struct dma_fence *in_fence;
1489 
1490 		in_fence = sync_file_get_fence(args->fence_fd);
1491 
1492 		if (!in_fence) {
1493 			ret = UERR(EINVAL, dev, "invalid in-fence");
1494 			goto out_unlock;
1495 		}
1496 
1497 		ret = drm_sched_job_add_dependency(&job->base, in_fence);
1498 		if (ret)
1499 			goto out_unlock;
1500 	}
1501 
1502 	if (args->in_syncobjs > 0) {
1503 		syncobjs_to_reset = msm_syncobj_parse_deps(dev, &job->base,
1504 							   file, args->in_syncobjs,
1505 							   args->nr_in_syncobjs,
1506 							   args->syncobj_stride);
1507 		if (IS_ERR(syncobjs_to_reset)) {
1508 			ret = PTR_ERR(syncobjs_to_reset);
1509 			goto out_unlock;
1510 		}
1511 	}
1512 
1513 	if (args->out_syncobjs > 0) {
1514 		post_deps = msm_syncobj_parse_post_deps(dev, file,
1515 							args->out_syncobjs,
1516 							args->nr_out_syncobjs,
1517 							args->syncobj_stride);
1518 		if (IS_ERR(post_deps)) {
1519 			ret = PTR_ERR(post_deps);
1520 			goto out_unlock;
1521 		}
1522 	}
1523 
1524 	ret = vm_bind_job_lookup_ops(job, args, file, &nr_bos);
1525 	if (ret)
1526 		goto out_unlock;
1527 
1528 	ret = vm_bind_prealloc_count(job);
1529 	if (ret)
1530 		goto out_unlock;
1531 
1532 	struct drm_exec exec;
1533 	unsigned flags = DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT;
1534 	drm_exec_init(&exec, flags, nr_bos + 1);
1535 
1536 	ret = vm_bind_job_lock_objects(job, &exec);
1537 	if (ret)
1538 		goto out;
1539 
1540 	ret = vm_bind_job_pin_objects(job);
1541 	if (ret)
1542 		goto out;
1543 
1544 	ret = vm_bind_job_prepare(job);
1545 	if (ret)
1546 		goto out;
1547 
1548 	drm_sched_job_arm(&job->base);
1549 
1550 	job->fence = dma_fence_get(&job->base.s_fence->finished);
1551 
1552 	if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
1553 		sync_file = sync_file_create(job->fence);
1554 		if (!sync_file)
1555 			ret = -ENOMEM;
1556 	}
1557 
1558 	if (ret)
1559 		goto out;
1560 
1561 	vm_bind_job_attach_fences(job);
1562 
1563 	/*
1564 	 * The job can be free'd (and fence unref'd) at any point after
1565 	 * drm_sched_entity_push_job(), so we need to hold our own ref
1566 	 */
1567 	fence = dma_fence_get(job->fence);
1568 
1569 	drm_sched_entity_push_job(&job->base);
1570 
1571 	msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs);
1572 	msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, fence);
1573 
1574 	dma_fence_put(fence);
1575 
1576 out:
1577 	if (ret)
1578 		vm_bind_job_unpin_objects(job);
1579 
1580 	drm_exec_fini(&exec);
1581 out_unlock:
1582 	mutex_unlock(&queue->lock);
1583 out_post_unlock:
1584 	if (ret) {
1585 		if (out_fence_fd >= 0)
1586 			put_unused_fd(out_fence_fd);
1587 		if (sync_file)
1588 			fput(sync_file->file);
1589 	} else if (sync_file) {
1590 		fd_install(out_fence_fd, sync_file->file);
1591 		args->fence_fd = out_fence_fd;
1592 	}
1593 
1594 	if (!IS_ERR_OR_NULL(job)) {
1595 		if (ret)
1596 			msm_vma_job_free(&job->base);
1597 	} else {
1598 		/*
1599 		 * If the submit hasn't yet taken ownership of the queue
1600 		 * then we need to drop the reference ourself:
1601 		 */
1602 		msm_submitqueue_put(queue);
1603 	}
1604 
1605 	if (!IS_ERR_OR_NULL(post_deps)) {
1606 		for (i = 0; i < args->nr_out_syncobjs; ++i) {
1607 			kfree(post_deps[i].chain);
1608 			drm_syncobj_put(post_deps[i].syncobj);
1609 		}
1610 		kfree(post_deps);
1611 	}
1612 
1613 	if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
1614 		for (i = 0; i < args->nr_in_syncobjs; ++i) {
1615 			if (syncobjs_to_reset[i])
1616 				drm_syncobj_put(syncobjs_to_reset[i]);
1617 		}
1618 		kfree(syncobjs_to_reset);
1619 	}
1620 
1621 	return ret;
1622 }
1623