xref: /linux/drivers/gpu/drm/msm/msm_gem_vma.c (revision 205bd15619322a1429c1bf53831a284a12b25e2a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include "drm/drm_file.h"
8 #include "drm/msm_drm.h"
9 #include "linux/file.h"
10 #include "linux/sync_file.h"
11 
12 #include "msm_drv.h"
13 #include "msm_gem.h"
14 #include "msm_gpu.h"
15 #include "msm_mmu.h"
16 #include "msm_syncobj.h"
17 
18 #define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__)
19 
20 static uint vm_log_shift = 0;
21 MODULE_PARM_DESC(vm_log_shift, "Length of VM op log");
22 module_param_named(vm_log_shift, vm_log_shift, uint, 0600);
23 
24 /**
25  * struct msm_vm_map_op - create new pgtable mapping
26  */
27 struct msm_vm_map_op {
28 	/** @iova: start address for mapping */
29 	uint64_t iova;
30 	/** @range: size of the region to map */
31 	uint64_t range;
32 	/** @offset: offset into @sgt to map */
33 	uint64_t offset;
34 	/** @sgt: pages to map, or NULL for a PRR mapping */
35 	struct sg_table *sgt;
36 	/** @prot: the mapping protection flags */
37 	int prot;
38 
39 	/**
40 	 * @queue_id: The id of the submitqueue the operation is performed
41 	 * on, or zero for (in particular) UNMAP ops triggered outside of
42 	 * a submitqueue (ie. process cleanup)
43 	 */
44 	int queue_id;
45 };
46 
47 /**
48  * struct msm_vm_unmap_op - unmap a range of pages from pgtable
49  */
50 struct msm_vm_unmap_op {
51 	/** @iova: start address for unmap */
52 	uint64_t iova;
53 	/** @range: size of region to unmap */
54 	uint64_t range;
55 
56 	/** @reason: The reason for the unmap */
57 	const char *reason;
58 
59 	/**
60 	 * @queue_id: The id of the submitqueue the operation is performed
61 	 * on, or zero for (in particular) UNMAP ops triggered outside of
62 	 * a submitqueue (ie. process cleanup)
63 	 */
64 	int queue_id;
65 };
66 
67 /**
68  * struct msm_vm_op - A MAP or UNMAP operation
69  */
70 struct msm_vm_op {
71 	/** @op: The operation type */
72 	enum {
73 		MSM_VM_OP_MAP = 1,
74 		MSM_VM_OP_UNMAP,
75 	} op;
76 	union {
77 		/** @map: Parameters used if op == MSM_VMA_OP_MAP */
78 		struct msm_vm_map_op map;
79 		/** @unmap: Parameters used if op == MSM_VMA_OP_UNMAP */
80 		struct msm_vm_unmap_op unmap;
81 	};
82 	/** @node: list head in msm_vm_bind_job::vm_ops */
83 	struct list_head node;
84 
85 	/**
86 	 * @obj: backing object for pages to be mapped/unmapped
87 	 *
88 	 * Async unmap ops, in particular, must hold a reference to the
89 	 * original GEM object backing the mapping that will be unmapped.
90 	 * But the same can be required in the map path, for example if
91 	 * there is not a corresponding unmap op, such as process exit.
92 	 *
93 	 * This ensures that the pages backing the mapping are not freed
94 	 * before the mapping is torn down.
95 	 */
96 	struct drm_gem_object *obj;
97 };
98 
99 /**
100  * struct msm_vm_bind_job - Tracking for a VM_BIND ioctl
101  *
102  * A table of userspace requested VM updates (MSM_VM_BIND_OP_UNMAP/MAP/MAP_NULL)
103  * gets applied to the vm, generating a list of VM ops (MSM_VM_OP_MAP/UNMAP)
104  * which are applied to the pgtables asynchronously.  For example a userspace
105  * requested MSM_VM_BIND_OP_MAP could end up generating both an MSM_VM_OP_UNMAP
106  * to unmap an existing mapping, and a MSM_VM_OP_MAP to apply the new mapping.
107  */
108 struct msm_vm_bind_job {
109 	/** @base: base class for drm_sched jobs */
110 	struct drm_sched_job base;
111 	/** @vm: The VM being operated on */
112 	struct drm_gpuvm *vm;
113 	/** @fence: The fence that is signaled when job completes */
114 	struct dma_fence *fence;
115 	/** @queue: The queue that the job runs on */
116 	struct msm_gpu_submitqueue *queue;
117 	/** @prealloc: Tracking for pre-allocated MMU pgtable pages */
118 	struct msm_mmu_prealloc prealloc;
119 	/** @vm_ops: a list of struct msm_vm_op */
120 	struct list_head vm_ops;
121 	/** @bos_pinned: are the GEM objects being bound pinned? */
122 	bool bos_pinned;
123 	/** @nr_ops: the number of userspace requested ops */
124 	unsigned int nr_ops;
125 	/**
126 	 * @ops: the userspace requested ops
127 	 *
128 	 * The userspace requested ops are copied/parsed and validated
129 	 * before we start applying the updates to try to do as much up-
130 	 * front error checking as possible, to avoid the VM being in an
131 	 * undefined state due to partially executed VM_BIND.
132 	 *
133 	 * This table also serves to hold a reference to the backing GEM
134 	 * objects.
135 	 */
136 	struct msm_vm_bind_op {
137 		uint32_t op;
138 		uint32_t flags;
139 		union {
140 			struct drm_gem_object *obj;
141 			uint32_t handle;
142 		};
143 		uint64_t obj_offset;
144 		uint64_t iova;
145 		uint64_t range;
146 	} ops[];
147 };
148 
149 #define job_foreach_bo(obj, _job) \
150 	for (unsigned i = 0; i < (_job)->nr_ops; i++) \
151 		if ((obj = (_job)->ops[i].obj))
152 
153 static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job)
154 {
155 	return container_of(job, struct msm_vm_bind_job, base);
156 }
157 
158 static void
159 msm_gem_vm_free(struct drm_gpuvm *gpuvm)
160 {
161 	struct msm_gem_vm *vm = container_of(gpuvm, struct msm_gem_vm, base);
162 
163 	drm_mm_takedown(&vm->mm);
164 	if (vm->mmu)
165 		vm->mmu->funcs->destroy(vm->mmu);
166 	dma_fence_put(vm->last_fence);
167 	put_pid(vm->pid);
168 	kfree(vm->log);
169 	kfree(vm);
170 }
171 
172 /**
173  * msm_gem_vm_unusable() - Mark a VM as unusable
174  * @gpuvm: the VM to mark unusable
175  */
176 void
177 msm_gem_vm_unusable(struct drm_gpuvm *gpuvm)
178 {
179 	struct msm_gem_vm *vm = to_msm_vm(gpuvm);
180 	uint32_t vm_log_len = (1 << vm->log_shift);
181 	uint32_t vm_log_mask = vm_log_len - 1;
182 	uint32_t nr_vm_logs;
183 	int first;
184 
185 	vm->unusable = true;
186 
187 	/* Bail if no log, or empty log: */
188 	if (!vm->log || !vm->log[0].op)
189 		return;
190 
191 	mutex_lock(&vm->mmu_lock);
192 
193 	/*
194 	 * log_idx is the next entry to overwrite, meaning it is the oldest, or
195 	 * first, entry (other than the special case handled below where the
196 	 * log hasn't wrapped around yet)
197 	 */
198 	first = vm->log_idx;
199 
200 	if (!vm->log[first].op) {
201 		/*
202 		 * If the next log entry has not been written yet, then only
203 		 * entries 0 to idx-1 are valid (ie. we haven't wrapped around
204 		 * yet)
205 		 */
206 		nr_vm_logs = MAX(0, first - 1);
207 		first = 0;
208 	} else {
209 		nr_vm_logs = vm_log_len;
210 	}
211 
212 	pr_err("vm-log:\n");
213 	for (int i = 0; i < nr_vm_logs; i++) {
214 		int idx = (i + first) & vm_log_mask;
215 		struct msm_gem_vm_log_entry *e = &vm->log[idx];
216 		pr_err("  - %s:%d: 0x%016llx-0x%016llx\n",
217 		       e->op, e->queue_id, e->iova,
218 		       e->iova + e->range);
219 	}
220 
221 	mutex_unlock(&vm->mmu_lock);
222 }
223 
224 static void
225 vm_log(struct msm_gem_vm *vm, const char *op, uint64_t iova, uint64_t range, int queue_id)
226 {
227 	int idx;
228 
229 	if (!vm->managed)
230 		lockdep_assert_held(&vm->mmu_lock);
231 
232 	vm_dbg("%s:%p:%d: %016llx %016llx", op, vm, queue_id, iova, iova + range);
233 
234 	if (!vm->log)
235 		return;
236 
237 	idx = vm->log_idx;
238 	vm->log[idx].op = op;
239 	vm->log[idx].iova = iova;
240 	vm->log[idx].range = range;
241 	vm->log[idx].queue_id = queue_id;
242 	vm->log_idx = (vm->log_idx + 1) & ((1 << vm->log_shift) - 1);
243 }
244 
245 static void
246 vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op)
247 {
248 	const char *reason = op->reason;
249 
250 	if (!reason)
251 		reason = "unmap";
252 
253 	vm_log(vm, reason, op->iova, op->range, op->queue_id);
254 
255 	vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range);
256 }
257 
258 static int
259 vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op)
260 {
261 	vm_log(vm, "map", op->iova, op->range, op->queue_id);
262 
263 	return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset,
264 				   op->range, op->prot);
265 }
266 
267 /* Actually unmap memory for the vma */
268 void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason)
269 {
270 	struct msm_gem_vm *vm = to_msm_vm(vma->vm);
271 	struct msm_gem_vma *msm_vma = to_msm_vma(vma);
272 
273 	/* Don't do anything if the memory isn't mapped */
274 	if (!msm_vma->mapped)
275 		return;
276 
277 	/*
278 	 * The mmu_lock is only needed when preallocation is used.  But
279 	 * in that case we don't need to worry about recursion into
280 	 * shrinker
281 	 */
282 	if (!vm->managed)
283 		 mutex_lock(&vm->mmu_lock);
284 
285 	vm_unmap_op(vm, &(struct msm_vm_unmap_op){
286 		.iova = vma->va.addr,
287 		.range = vma->va.range,
288 		.reason = reason,
289 	});
290 
291 	if (!vm->managed)
292 		mutex_unlock(&vm->mmu_lock);
293 
294 	msm_vma->mapped = false;
295 }
296 
297 /* Map and pin vma: */
298 int
299 msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt)
300 {
301 	struct msm_gem_vm *vm = to_msm_vm(vma->vm);
302 	struct msm_gem_vma *msm_vma = to_msm_vma(vma);
303 	int ret;
304 
305 	if (GEM_WARN_ON(!vma->va.addr))
306 		return -EINVAL;
307 
308 	if (msm_vma->mapped)
309 		return 0;
310 
311 	msm_vma->mapped = true;
312 
313 	/*
314 	 * The mmu_lock is only needed when preallocation is used.  But
315 	 * in that case we don't need to worry about recursion into
316 	 * shrinker
317 	 */
318 	if (!vm->managed)
319 		mutex_lock(&vm->mmu_lock);
320 
321 	/*
322 	 * NOTE: if not using pgtable preallocation, we cannot hold
323 	 * a lock across map/unmap which is also used in the job_run()
324 	 * path, as this can cause deadlock in job_run() vs shrinker/
325 	 * reclaim.
326 	 */
327 	ret = vm_map_op(vm, &(struct msm_vm_map_op){
328 		.iova = vma->va.addr,
329 		.range = vma->va.range,
330 		.offset = vma->gem.offset,
331 		.sgt = sgt,
332 		.prot = prot,
333 	});
334 
335 	if (!vm->managed)
336 		mutex_unlock(&vm->mmu_lock);
337 
338 	if (ret)
339 		msm_vma->mapped = false;
340 
341 	return ret;
342 }
343 
344 /* Close an iova.  Warn if it is still in use */
345 void msm_gem_vma_close(struct drm_gpuva *vma)
346 {
347 	struct msm_gem_vm *vm = to_msm_vm(vma->vm);
348 	struct msm_gem_vma *msm_vma = to_msm_vma(vma);
349 
350 	GEM_WARN_ON(msm_vma->mapped);
351 
352 	drm_gpuvm_resv_assert_held(&vm->base);
353 
354 	if (vma->gem.obj)
355 		msm_gem_assert_locked(vma->gem.obj);
356 
357 	if (vma->va.addr && vm->managed)
358 		drm_mm_remove_node(&msm_vma->node);
359 
360 	drm_gpuva_remove(vma);
361 	drm_gpuva_unlink(vma);
362 
363 	kfree(vma);
364 }
365 
366 /* Create a new vma and allocate an iova for it */
367 struct drm_gpuva *
368 msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj,
369 		u64 offset, u64 range_start, u64 range_end)
370 {
371 	struct msm_gem_vm *vm = to_msm_vm(gpuvm);
372 	struct drm_gpuvm_bo *vm_bo;
373 	struct msm_gem_vma *vma;
374 	int ret;
375 
376 	drm_gpuvm_resv_assert_held(&vm->base);
377 
378 	vma = kzalloc(sizeof(*vma), GFP_KERNEL);
379 	if (!vma)
380 		return ERR_PTR(-ENOMEM);
381 
382 	if (vm->managed) {
383 		BUG_ON(offset != 0);
384 		BUG_ON(!obj);  /* NULL mappings not valid for kernel managed VM */
385 		ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node,
386 						obj->size, PAGE_SIZE, 0,
387 						range_start, range_end, 0);
388 
389 		if (ret)
390 			goto err_free_vma;
391 
392 		range_start = vma->node.start;
393 		range_end   = range_start + obj->size;
394 	}
395 
396 	if (obj)
397 		GEM_WARN_ON((range_end - range_start) > obj->size);
398 
399 	struct drm_gpuva_op_map op_map = {
400 		.va.addr = range_start,
401 		.va.range = range_end - range_start,
402 		.gem.obj = obj,
403 		.gem.offset = offset,
404 	};
405 
406 	drm_gpuva_init_from_op(&vma->base, &op_map);
407 	vma->mapped = false;
408 
409 	ret = drm_gpuva_insert(&vm->base, &vma->base);
410 	if (ret)
411 		goto err_free_range;
412 
413 	if (!obj)
414 		return &vma->base;
415 
416 	vm_bo = drm_gpuvm_bo_obtain_locked(&vm->base, obj);
417 	if (IS_ERR(vm_bo)) {
418 		ret = PTR_ERR(vm_bo);
419 		goto err_va_remove;
420 	}
421 
422 	drm_gpuvm_bo_extobj_add(vm_bo);
423 	drm_gpuva_link(&vma->base, vm_bo);
424 	GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo));
425 
426 	return &vma->base;
427 
428 err_va_remove:
429 	drm_gpuva_remove(&vma->base);
430 err_free_range:
431 	if (vm->managed)
432 		drm_mm_remove_node(&vma->node);
433 err_free_vma:
434 	kfree(vma);
435 	return ERR_PTR(ret);
436 }
437 
438 static int
439 msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec)
440 {
441 	struct drm_gem_object *obj = vm_bo->obj;
442 	struct drm_gpuva *vma;
443 	int ret;
444 
445 	vm_dbg("validate: %p", obj);
446 
447 	msm_gem_assert_locked(obj);
448 
449 	drm_gpuvm_bo_for_each_va (vma, vm_bo) {
450 		ret = msm_gem_pin_vma_locked(obj, vma);
451 		if (ret)
452 			return ret;
453 	}
454 
455 	return 0;
456 }
457 
458 struct op_arg {
459 	unsigned flags;
460 	struct msm_vm_bind_job *job;
461 	const struct msm_vm_bind_op *op;
462 	bool kept;
463 };
464 
465 static int
466 vm_op_enqueue(struct op_arg *arg, struct msm_vm_op _op)
467 {
468 	struct msm_vm_op *op = kmalloc(sizeof(*op), GFP_KERNEL);
469 	if (!op)
470 		return -ENOMEM;
471 
472 	*op = _op;
473 	list_add_tail(&op->node, &arg->job->vm_ops);
474 
475 	if (op->obj)
476 		drm_gem_object_get(op->obj);
477 
478 	return 0;
479 }
480 
481 static struct drm_gpuva *
482 vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op)
483 {
484 	return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset,
485 			       op->va.addr, op->va.addr + op->va.range);
486 }
487 
488 static int
489 msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *_arg)
490 {
491 	struct op_arg *arg = _arg;
492 	struct msm_vm_bind_job *job = arg->job;
493 	struct drm_gem_object *obj = op->map.gem.obj;
494 	struct drm_gpuva *vma;
495 	struct sg_table *sgt;
496 	unsigned prot;
497 	int ret;
498 
499 	if (arg->kept)
500 		return 0;
501 
502 	vma = vma_from_op(arg, &op->map);
503 	if (WARN_ON(IS_ERR(vma)))
504 		return PTR_ERR(vma);
505 
506 	vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
507 	       vma->va.addr, vma->va.range);
508 
509 	if (obj) {
510 		sgt = to_msm_bo(obj)->sgt;
511 		prot = msm_gem_prot(obj);
512 	} else {
513 		sgt = NULL;
514 		prot = IOMMU_READ | IOMMU_WRITE;
515 	}
516 
517 	ret = vm_op_enqueue(arg, (struct msm_vm_op){
518 		.op = MSM_VM_OP_MAP,
519 		.map = {
520 			.sgt = sgt,
521 			.iova = vma->va.addr,
522 			.range = vma->va.range,
523 			.offset = vma->gem.offset,
524 			.prot = prot,
525 			.queue_id = job->queue->id,
526 		},
527 		.obj = vma->gem.obj,
528 	});
529 
530 	if (ret)
531 		return ret;
532 
533 	vma->flags = ((struct op_arg *)arg)->flags;
534 	to_msm_vma(vma)->mapped = true;
535 
536 	return 0;
537 }
538 
539 static int
540 msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg)
541 {
542 	struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job;
543 	struct drm_gpuvm *vm = job->vm;
544 	struct drm_gpuva *orig_vma = op->remap.unmap->va;
545 	struct drm_gpuva *prev_vma = NULL, *next_vma = NULL;
546 	struct drm_gpuvm_bo *vm_bo = orig_vma->vm_bo;
547 	bool mapped = to_msm_vma(orig_vma)->mapped;
548 	unsigned flags;
549 	int ret;
550 
551 	vm_dbg("orig_vma: %p:%p:%p: %016llx %016llx", vm, orig_vma,
552 	       orig_vma->gem.obj, orig_vma->va.addr, orig_vma->va.range);
553 
554 	if (mapped) {
555 		uint64_t unmap_start, unmap_range;
556 
557 		drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range);
558 
559 		ret = vm_op_enqueue(arg, (struct msm_vm_op){
560 			.op = MSM_VM_OP_UNMAP,
561 			.unmap = {
562 				.iova = unmap_start,
563 				.range = unmap_range,
564 				.queue_id = job->queue->id,
565 			},
566 			.obj = orig_vma->gem.obj,
567 		});
568 
569 		if (ret)
570 			return ret;
571 
572 		/*
573 		 * Part of this GEM obj is still mapped, but we're going to kill the
574 		 * existing VMA and replace it with one or two new ones (ie. two if
575 		 * the unmapped range is in the middle of the existing (unmap) VMA).
576 		 * So just set the state to unmapped:
577 		 */
578 		to_msm_vma(orig_vma)->mapped = false;
579 	}
580 
581 	/*
582 	 * Hold a ref to the vm_bo between the msm_gem_vma_close() and the
583 	 * creation of the new prev/next vma's, in case the vm_bo is tracked
584 	 * in the VM's evict list:
585 	 */
586 	if (vm_bo)
587 		drm_gpuvm_bo_get(vm_bo);
588 
589 	/*
590 	 * The prev_vma and/or next_vma are replacing the unmapped vma, and
591 	 * therefore should preserve it's flags:
592 	 */
593 	flags = orig_vma->flags;
594 
595 	msm_gem_vma_close(orig_vma);
596 
597 	if (op->remap.prev) {
598 		prev_vma = vma_from_op(arg, op->remap.prev);
599 		if (WARN_ON(IS_ERR(prev_vma)))
600 			return PTR_ERR(prev_vma);
601 
602 		vm_dbg("prev_vma: %p:%p: %016llx %016llx", vm, prev_vma, prev_vma->va.addr, prev_vma->va.range);
603 		to_msm_vma(prev_vma)->mapped = mapped;
604 		prev_vma->flags = flags;
605 	}
606 
607 	if (op->remap.next) {
608 		next_vma = vma_from_op(arg, op->remap.next);
609 		if (WARN_ON(IS_ERR(next_vma)))
610 			return PTR_ERR(next_vma);
611 
612 		vm_dbg("next_vma: %p:%p: %016llx %016llx", vm, next_vma, next_vma->va.addr, next_vma->va.range);
613 		to_msm_vma(next_vma)->mapped = mapped;
614 		next_vma->flags = flags;
615 	}
616 
617 	if (!mapped)
618 		drm_gpuvm_bo_evict(vm_bo, true);
619 
620 	/* Drop the previous ref: */
621 	drm_gpuvm_bo_put(vm_bo);
622 
623 	return 0;
624 }
625 
626 static int
627 msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *_arg)
628 {
629 	struct op_arg *arg = _arg;
630 	struct msm_vm_bind_job *job = arg->job;
631 	struct drm_gpuva *vma = op->unmap.va;
632 	struct msm_gem_vma *msm_vma = to_msm_vma(vma);
633 	int ret;
634 
635 	vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj,
636 	       vma->va.addr, vma->va.range);
637 
638 	/*
639 	 * Detect in-place remap.  Turnip does this to change the vma flags,
640 	 * in particular MSM_VMA_DUMP.  In this case we want to avoid actually
641 	 * touching the page tables, as that would require synchronization
642 	 * against SUBMIT jobs running on the GPU.
643 	 */
644 	if (op->unmap.keep &&
645 	    (arg->op->op == MSM_VM_BIND_OP_MAP) &&
646 	    (vma->gem.obj == arg->op->obj) &&
647 	    (vma->gem.offset == arg->op->obj_offset) &&
648 	    (vma->va.addr == arg->op->iova) &&
649 	    (vma->va.range == arg->op->range)) {
650 		/* We are only expecting a single in-place unmap+map cb pair: */
651 		WARN_ON(arg->kept);
652 
653 		/* Leave the existing VMA in place, but signal that to the map cb: */
654 		arg->kept = true;
655 
656 		/* Only flags are changing, so update that in-place: */
657 		unsigned orig_flags = vma->flags & (DRM_GPUVA_USERBITS - 1);
658 		vma->flags = orig_flags | arg->flags;
659 
660 		return 0;
661 	}
662 
663 	if (!msm_vma->mapped)
664 		goto out_close;
665 
666 	ret = vm_op_enqueue(arg, (struct msm_vm_op){
667 		.op = MSM_VM_OP_UNMAP,
668 		.unmap = {
669 			.iova = vma->va.addr,
670 			.range = vma->va.range,
671 			.queue_id = job->queue->id,
672 		},
673 		.obj = vma->gem.obj,
674 	});
675 
676 	if (ret)
677 		return ret;
678 
679 	msm_vma->mapped = false;
680 
681 out_close:
682 	msm_gem_vma_close(vma);
683 
684 	return 0;
685 }
686 
687 static const struct drm_gpuvm_ops msm_gpuvm_ops = {
688 	.vm_free = msm_gem_vm_free,
689 	.vm_bo_validate = msm_gem_vm_bo_validate,
690 	.sm_step_map = msm_gem_vm_sm_step_map,
691 	.sm_step_remap = msm_gem_vm_sm_step_remap,
692 	.sm_step_unmap = msm_gem_vm_sm_step_unmap,
693 };
694 
695 static struct dma_fence *
696 msm_vma_job_run(struct drm_sched_job *_job)
697 {
698 	struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job);
699 	struct msm_gem_vm *vm = to_msm_vm(job->vm);
700 	struct drm_gem_object *obj;
701 	int ret = vm->unusable ? -EINVAL : 0;
702 
703 	vm_dbg("");
704 
705 	mutex_lock(&vm->mmu_lock);
706 	vm->mmu->prealloc = &job->prealloc;
707 
708 	while (!list_empty(&job->vm_ops)) {
709 		struct msm_vm_op *op =
710 			list_first_entry(&job->vm_ops, struct msm_vm_op, node);
711 
712 		switch (op->op) {
713 		case MSM_VM_OP_MAP:
714 			/*
715 			 * On error, stop trying to map new things.. but we
716 			 * still want to process the unmaps (or in particular,
717 			 * the drm_gem_object_put()s)
718 			 */
719 			if (!ret)
720 				ret = vm_map_op(vm, &op->map);
721 			break;
722 		case MSM_VM_OP_UNMAP:
723 			vm_unmap_op(vm, &op->unmap);
724 			break;
725 		}
726 		drm_gem_object_put(op->obj);
727 		list_del(&op->node);
728 		kfree(op);
729 	}
730 
731 	vm->mmu->prealloc = NULL;
732 	mutex_unlock(&vm->mmu_lock);
733 
734 	/*
735 	 * We failed to perform at least _some_ of the pgtable updates, so
736 	 * now the VM is in an undefined state.  Game over!
737 	 */
738 	if (ret)
739 		msm_gem_vm_unusable(job->vm);
740 
741 	job_foreach_bo (obj, job) {
742 		msm_gem_lock(obj);
743 		msm_gem_unpin_locked(obj);
744 		msm_gem_unlock(obj);
745 	}
746 
747 	/* VM_BIND ops are synchronous, so no fence to wait on: */
748 	return NULL;
749 }
750 
751 static void
752 msm_vma_job_free(struct drm_sched_job *_job)
753 {
754 	struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job);
755 	struct msm_gem_vm *vm = to_msm_vm(job->vm);
756 	struct drm_gem_object *obj;
757 
758 	vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc);
759 
760 	atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight);
761 
762 	drm_sched_job_cleanup(_job);
763 
764 	job_foreach_bo (obj, job)
765 		drm_gem_object_put(obj);
766 
767 	msm_submitqueue_put(job->queue);
768 	dma_fence_put(job->fence);
769 
770 	/* In error paths, we could have unexecuted ops: */
771 	while (!list_empty(&job->vm_ops)) {
772 		struct msm_vm_op *op =
773 			list_first_entry(&job->vm_ops, struct msm_vm_op, node);
774 		list_del(&op->node);
775 		kfree(op);
776 	}
777 
778 	wake_up(&vm->prealloc_throttle.wait);
779 
780 	kfree(job);
781 }
782 
783 static const struct drm_sched_backend_ops msm_vm_bind_ops = {
784 	.run_job = msm_vma_job_run,
785 	.free_job = msm_vma_job_free
786 };
787 
788 /**
789  * msm_gem_vm_create() - Create and initialize a &msm_gem_vm
790  * @drm: the drm device
791  * @mmu: the backing MMU objects handling mapping/unmapping
792  * @name: the name of the VM
793  * @va_start: the start offset of the VA space
794  * @va_size: the size of the VA space
795  * @managed: is it a kernel managed VM?
796  *
797  * In a kernel managed VM, the kernel handles address allocation, and only
798  * synchronous operations are supported.  In a user managed VM, userspace
799  * handles virtual address allocation, and both async and sync operations
800  * are supported.
801  *
802  * Returns: pointer to the created &struct drm_gpuvm on success
803  * or an ERR_PTR(-errno) on failure.
804  */
805 struct drm_gpuvm *
806 msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name,
807 		  u64 va_start, u64 va_size, bool managed)
808 {
809 	/*
810 	 * We mostly want to use DRM_GPUVM_RESV_PROTECTED, except that
811 	 * makes drm_gpuvm_bo_evict() a no-op for extobjs (ie. we loose
812 	 * tracking that an extobj is evicted) :facepalm:
813 	 */
814 	enum drm_gpuvm_flags flags = 0;
815 	struct msm_gem_vm *vm;
816 	struct drm_gem_object *dummy_gem;
817 	int ret = 0;
818 
819 	if (IS_ERR(mmu))
820 		return ERR_CAST(mmu);
821 
822 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
823 	if (!vm)
824 		return ERR_PTR(-ENOMEM);
825 
826 	dummy_gem = drm_gpuvm_resv_object_alloc(drm);
827 	if (!dummy_gem) {
828 		ret = -ENOMEM;
829 		goto err_free_vm;
830 	}
831 
832 	if (!managed) {
833 		struct drm_sched_init_args args = {
834 			.ops = &msm_vm_bind_ops,
835 			.num_rqs = 1,
836 			.credit_limit = 1,
837 			.timeout = MAX_SCHEDULE_TIMEOUT,
838 			.name = "msm-vm-bind",
839 			.dev = drm->dev,
840 		};
841 
842 		ret = drm_sched_init(&vm->sched, &args);
843 		if (ret)
844 			goto err_free_dummy;
845 
846 		init_waitqueue_head(&vm->prealloc_throttle.wait);
847 	}
848 
849 	drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem,
850 		       va_start, va_size, 0, 0, &msm_gpuvm_ops);
851 	drm_gem_object_put(dummy_gem);
852 
853 	vm->mmu = mmu;
854 	mutex_init(&vm->mmu_lock);
855 	vm->managed = managed;
856 
857 	drm_mm_init(&vm->mm, va_start, va_size);
858 
859 	/*
860 	 * We don't really need vm log for kernel managed VMs, as the kernel
861 	 * is responsible for ensuring that GEM objs are mapped if they are
862 	 * used by a submit.  Furthermore we piggyback on mmu_lock to serialize
863 	 * access to the log.
864 	 *
865 	 * Limit the max log_shift to 8 to prevent userspace from asking us
866 	 * for an unreasonable log size.
867 	 */
868 	if (!managed)
869 		vm->log_shift = MIN(vm_log_shift, 8);
870 
871 	if (vm->log_shift) {
872 		vm->log = kmalloc_array(1 << vm->log_shift, sizeof(vm->log[0]),
873 					GFP_KERNEL | __GFP_ZERO);
874 	}
875 
876 	return &vm->base;
877 
878 err_free_dummy:
879 	drm_gem_object_put(dummy_gem);
880 
881 err_free_vm:
882 	kfree(vm);
883 	return ERR_PTR(ret);
884 }
885 
886 /**
887  * msm_gem_vm_close() - Close a VM
888  * @gpuvm: The VM to close
889  *
890  * Called when the drm device file is closed, to tear down VM related resources
891  * (which will drop refcounts to GEM objects that were still mapped into the
892  * VM at the time).
893  */
894 void
895 msm_gem_vm_close(struct drm_gpuvm *gpuvm)
896 {
897 	struct msm_gem_vm *vm = to_msm_vm(gpuvm);
898 	struct drm_gpuva *vma, *tmp;
899 	struct drm_exec exec;
900 
901 	/*
902 	 * For kernel managed VMs, the VMAs are torn down when the handle is
903 	 * closed, so nothing more to do.
904 	 */
905 	if (vm->managed)
906 		return;
907 
908 	if (vm->last_fence)
909 		dma_fence_wait(vm->last_fence, false);
910 
911 	/* Kill the scheduler now, so we aren't racing with it for cleanup: */
912 	drm_sched_stop(&vm->sched, NULL);
913 	drm_sched_fini(&vm->sched);
914 
915 	/* Tear down any remaining mappings: */
916 	drm_exec_init(&exec, 0, 2);
917 	drm_exec_until_all_locked (&exec) {
918 		drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(gpuvm));
919 		drm_exec_retry_on_contention(&exec);
920 
921 		drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) {
922 			struct drm_gem_object *obj = vma->gem.obj;
923 
924 			/*
925 			 * MSM_BO_NO_SHARE objects share the same resv as the
926 			 * VM, in which case the obj is already locked:
927 			 */
928 			if (obj && (obj->resv == drm_gpuvm_resv(gpuvm)))
929 				obj = NULL;
930 
931 			if (obj) {
932 				drm_exec_lock_obj(&exec, obj);
933 				drm_exec_retry_on_contention(&exec);
934 			}
935 
936 			msm_gem_vma_unmap(vma, "close");
937 			msm_gem_vma_close(vma);
938 
939 			if (obj) {
940 				drm_exec_unlock_obj(&exec, obj);
941 			}
942 		}
943 	}
944 	drm_exec_fini(&exec);
945 }
946 
947 
948 static struct msm_vm_bind_job *
949 vm_bind_job_create(struct drm_device *dev, struct drm_file *file,
950 		   struct msm_gpu_submitqueue *queue, uint32_t nr_ops)
951 {
952 	struct msm_vm_bind_job *job;
953 	int ret;
954 
955 	job = kzalloc(struct_size(job, ops, nr_ops), GFP_KERNEL | __GFP_NOWARN);
956 	if (!job)
957 		return ERR_PTR(-ENOMEM);
958 
959 	ret = drm_sched_job_init(&job->base, queue->entity, 1, queue,
960 				 file->client_id);
961 	if (ret) {
962 		kfree(job);
963 		return ERR_PTR(ret);
964 	}
965 
966 	job->vm = msm_context_vm(dev, queue->ctx);
967 	job->queue = queue;
968 	INIT_LIST_HEAD(&job->vm_ops);
969 
970 	return job;
971 }
972 
973 static bool invalid_alignment(uint64_t addr)
974 {
975 	/*
976 	 * Technically this is about GPU alignment, not CPU alignment.  But
977 	 * I've not seen any qcom SoC where the SMMU does not support the
978 	 * CPU's smallest page size.
979 	 */
980 	return !PAGE_ALIGNED(addr);
981 }
982 
983 static int
984 lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op)
985 {
986 	struct drm_device *dev = job->vm->drm;
987 	struct msm_drm_private *priv = dev->dev_private;
988 	int i = job->nr_ops++;
989 	int ret = 0;
990 
991 	job->ops[i].op = op->op;
992 	job->ops[i].handle = op->handle;
993 	job->ops[i].obj_offset = op->obj_offset;
994 	job->ops[i].iova = op->iova;
995 	job->ops[i].range = op->range;
996 	job->ops[i].flags = op->flags;
997 
998 	if (op->flags & ~MSM_VM_BIND_OP_FLAGS)
999 		ret = UERR(EINVAL, dev, "invalid flags: %x\n", op->flags);
1000 
1001 	if (invalid_alignment(op->iova))
1002 		ret = UERR(EINVAL, dev, "invalid address: %016llx\n", op->iova);
1003 
1004 	if (invalid_alignment(op->obj_offset))
1005 		ret = UERR(EINVAL, dev, "invalid bo_offset: %016llx\n", op->obj_offset);
1006 
1007 	if (invalid_alignment(op->range))
1008 		ret = UERR(EINVAL, dev, "invalid range: %016llx\n", op->range);
1009 
1010 	if (!drm_gpuvm_range_valid(job->vm, op->iova, op->range))
1011 		ret = UERR(EINVAL, dev, "invalid range: %016llx, %016llx\n", op->iova, op->range);
1012 
1013 	/*
1014 	 * MAP must specify a valid handle.  But the handle MBZ for
1015 	 * UNMAP or MAP_NULL.
1016 	 */
1017 	if (op->op == MSM_VM_BIND_OP_MAP) {
1018 		if (!op->handle)
1019 			ret = UERR(EINVAL, dev, "invalid handle\n");
1020 	} else if (op->handle) {
1021 		ret = UERR(EINVAL, dev, "handle must be zero\n");
1022 	}
1023 
1024 	switch (op->op) {
1025 	case MSM_VM_BIND_OP_MAP:
1026 	case MSM_VM_BIND_OP_MAP_NULL:
1027 	case MSM_VM_BIND_OP_UNMAP:
1028 		break;
1029 	default:
1030 		ret = UERR(EINVAL, dev, "invalid op: %u\n", op->op);
1031 		break;
1032 	}
1033 
1034 	if ((op->op == MSM_VM_BIND_OP_MAP_NULL) &&
1035 	    !adreno_smmu_has_prr(priv->gpu)) {
1036 		ret = UERR(EINVAL, dev, "PRR not supported\n");
1037 	}
1038 
1039 	return ret;
1040 }
1041 
1042 /*
1043  * ioctl parsing, parameter validation, and GEM handle lookup
1044  */
1045 static int
1046 vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args,
1047 		       struct drm_file *file, int *nr_bos)
1048 {
1049 	struct drm_device *dev = job->vm->drm;
1050 	int ret = 0;
1051 	int cnt = 0;
1052 	int i = -1;
1053 
1054 	if (args->nr_ops == 1) {
1055 		/* Single op case, the op is inlined: */
1056 		ret = lookup_op(job, &args->op);
1057 	} else {
1058 		for (unsigned i = 0; i < args->nr_ops; i++) {
1059 			struct drm_msm_vm_bind_op op;
1060 			void __user *userptr =
1061 				u64_to_user_ptr(args->ops + (i * sizeof(op)));
1062 
1063 			/* make sure we don't have garbage flags, in case we hit
1064 			 * error path before flags is initialized:
1065 			 */
1066 			job->ops[i].flags = 0;
1067 
1068 			if (copy_from_user(&op, userptr, sizeof(op))) {
1069 				ret = -EFAULT;
1070 				break;
1071 			}
1072 
1073 			ret = lookup_op(job, &op);
1074 			if (ret)
1075 				break;
1076 		}
1077 	}
1078 
1079 	if (ret) {
1080 		job->nr_ops = 0;
1081 		goto out;
1082 	}
1083 
1084 	spin_lock(&file->table_lock);
1085 
1086 	for (i = 0; i < args->nr_ops; i++) {
1087 		struct msm_vm_bind_op *op = &job->ops[i];
1088 		struct drm_gem_object *obj;
1089 
1090 		if (!op->handle) {
1091 			op->obj = NULL;
1092 			continue;
1093 		}
1094 
1095 		/*
1096 		 * normally use drm_gem_object_lookup(), but for bulk lookup
1097 		 * all under single table_lock just hit object_idr directly:
1098 		 */
1099 		obj = idr_find(&file->object_idr, op->handle);
1100 		if (!obj) {
1101 			ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", op->handle, i);
1102 			goto out_unlock;
1103 		}
1104 
1105 		drm_gem_object_get(obj);
1106 
1107 		op->obj = obj;
1108 		cnt++;
1109 
1110 		if ((op->range + op->obj_offset) > obj->size) {
1111 			ret = UERR(EINVAL, dev, "invalid range: %016llx + %016llx > %016zx\n",
1112 				   op->range, op->obj_offset, obj->size);
1113 			goto out_unlock;
1114 		}
1115 	}
1116 
1117 	*nr_bos = cnt;
1118 
1119 out_unlock:
1120 	spin_unlock(&file->table_lock);
1121 
1122 	if (ret) {
1123 		for (; i >= 0; i--) {
1124 			struct msm_vm_bind_op *op = &job->ops[i];
1125 
1126 			if (!op->obj)
1127 				continue;
1128 
1129 			drm_gem_object_put(op->obj);
1130 			op->obj = NULL;
1131 		}
1132 	}
1133 out:
1134 	return ret;
1135 }
1136 
1137 static void
1138 prealloc_count(struct msm_vm_bind_job *job,
1139 	       struct msm_vm_bind_op *first,
1140 	       struct msm_vm_bind_op *last)
1141 {
1142 	struct msm_mmu *mmu = to_msm_vm(job->vm)->mmu;
1143 
1144 	if (!first)
1145 		return;
1146 
1147 	uint64_t start_iova = first->iova;
1148 	uint64_t end_iova = last->iova + last->range;
1149 
1150 	mmu->funcs->prealloc_count(mmu, &job->prealloc, start_iova, end_iova - start_iova);
1151 }
1152 
1153 static bool
1154 ops_are_same_pte(struct msm_vm_bind_op *first, struct msm_vm_bind_op *next)
1155 {
1156 	/*
1157 	 * Last level pte covers 2MB.. so we should merge two ops, from
1158 	 * the PoV of figuring out how much pgtable pages to pre-allocate
1159 	 * if they land in the same 2MB range:
1160 	 */
1161 	uint64_t pte_mask = ~(SZ_2M - 1);
1162 	return ((first->iova + first->range) & pte_mask) == (next->iova & pte_mask);
1163 }
1164 
1165 /*
1166  * Determine the amount of memory to prealloc for pgtables.  For sparse images,
1167  * in particular, userspace plays some tricks with the order of page mappings
1168  * to get the desired swizzle pattern, resulting in a large # of tiny MAP ops.
1169  * So detect when multiple MAP operations are physically contiguous, and count
1170  * them as a single mapping.  Otherwise the prealloc_count() will not realize
1171  * they can share pagetable pages and vastly overcount.
1172  */
1173 static int
1174 vm_bind_prealloc_count(struct msm_vm_bind_job *job)
1175 {
1176 	struct msm_vm_bind_op *first = NULL, *last = NULL;
1177 	struct msm_gem_vm *vm = to_msm_vm(job->vm);
1178 	int ret;
1179 
1180 	for (int i = 0; i < job->nr_ops; i++) {
1181 		struct msm_vm_bind_op *op = &job->ops[i];
1182 
1183 		/* We only care about MAP/MAP_NULL: */
1184 		if (op->op == MSM_VM_BIND_OP_UNMAP)
1185 			continue;
1186 
1187 		/*
1188 		 * If op is contiguous with last in the current range, then
1189 		 * it becomes the new last in the range and we continue
1190 		 * looping:
1191 		 */
1192 		if (last && ops_are_same_pte(last, op)) {
1193 			last = op;
1194 			continue;
1195 		}
1196 
1197 		/*
1198 		 * If op is not contiguous with the current range, flush
1199 		 * the current range and start anew:
1200 		 */
1201 		prealloc_count(job, first, last);
1202 		first = last = op;
1203 	}
1204 
1205 	/* Flush the remaining range: */
1206 	prealloc_count(job, first, last);
1207 
1208 	/*
1209 	 * Now that we know the needed amount to pre-alloc, throttle on pending
1210 	 * VM_BIND jobs if we already have too much pre-alloc memory in flight
1211 	 */
1212 	ret = wait_event_interruptible(
1213 			vm->prealloc_throttle.wait,
1214 			atomic_read(&vm->prealloc_throttle.in_flight) <= 1024);
1215 	if (ret)
1216 		return ret;
1217 
1218 	atomic_add(job->prealloc.count, &vm->prealloc_throttle.in_flight);
1219 
1220 	return 0;
1221 }
1222 
1223 /*
1224  * Lock VM and GEM objects
1225  */
1226 static int
1227 vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec)
1228 {
1229 	int ret;
1230 
1231 	/* Lock VM and objects: */
1232 	drm_exec_until_all_locked (exec) {
1233 		ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(job->vm));
1234 		drm_exec_retry_on_contention(exec);
1235 		if (ret)
1236 			return ret;
1237 
1238 		for (unsigned i = 0; i < job->nr_ops; i++) {
1239 			const struct msm_vm_bind_op *op = &job->ops[i];
1240 
1241 			switch (op->op) {
1242 			case MSM_VM_BIND_OP_UNMAP:
1243 				ret = drm_gpuvm_sm_unmap_exec_lock(job->vm, exec,
1244 							      op->iova,
1245 							      op->obj_offset);
1246 				break;
1247 			case MSM_VM_BIND_OP_MAP:
1248 			case MSM_VM_BIND_OP_MAP_NULL: {
1249 				struct drm_gpuvm_map_req map_req = {
1250 					.map.va.addr = op->iova,
1251 					.map.va.range = op->range,
1252 					.map.gem.obj = op->obj,
1253 					.map.gem.offset = op->obj_offset,
1254 				};
1255 
1256 				ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, &map_req);
1257 				break;
1258 			}
1259 			default:
1260 				/*
1261 				 * lookup_op() should have already thrown an error for
1262 				 * invalid ops
1263 				 */
1264 				WARN_ON("unreachable");
1265 			}
1266 
1267 			drm_exec_retry_on_contention(exec);
1268 			if (ret)
1269 				return ret;
1270 		}
1271 	}
1272 
1273 	return 0;
1274 }
1275 
1276 /*
1277  * Pin GEM objects, ensuring that we have backing pages.  Pinning will move
1278  * the object to the pinned LRU so that the shrinker knows to first consider
1279  * other objects for evicting.
1280  */
1281 static int
1282 vm_bind_job_pin_objects(struct msm_vm_bind_job *job)
1283 {
1284 	struct drm_gem_object *obj;
1285 
1286 	/*
1287 	 * First loop, before holding the LRU lock, avoids holding the
1288 	 * LRU lock while calling msm_gem_pin_vma_locked (which could
1289 	 * trigger get_pages())
1290 	 */
1291 	job_foreach_bo (obj, job) {
1292 		struct page **pages;
1293 
1294 		pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED);
1295 		if (IS_ERR(pages))
1296 			return PTR_ERR(pages);
1297 	}
1298 
1299 	struct msm_drm_private *priv = job->vm->drm->dev_private;
1300 
1301 	/*
1302 	 * A second loop while holding the LRU lock (a) avoids acquiring/dropping
1303 	 * the LRU lock for each individual bo, while (b) avoiding holding the
1304 	 * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger
1305 	 * get_pages() which could trigger reclaim.. and if we held the LRU lock
1306 	 * could trigger deadlock with the shrinker).
1307 	 */
1308 	mutex_lock(&priv->lru.lock);
1309 	job_foreach_bo (obj, job)
1310 		msm_gem_pin_obj_locked(obj);
1311 	mutex_unlock(&priv->lru.lock);
1312 
1313 	job->bos_pinned = true;
1314 
1315 	return 0;
1316 }
1317 
1318 /*
1319  * Unpin GEM objects.  Normally this is done after the bind job is run.
1320  */
1321 static void
1322 vm_bind_job_unpin_objects(struct msm_vm_bind_job *job)
1323 {
1324 	struct drm_gem_object *obj;
1325 
1326 	if (!job->bos_pinned)
1327 		return;
1328 
1329 	job_foreach_bo (obj, job)
1330 		msm_gem_unpin_locked(obj);
1331 
1332 	job->bos_pinned = false;
1333 }
1334 
1335 /*
1336  * Pre-allocate pgtable memory, and translate the VM bind requests into a
1337  * sequence of pgtable updates to be applied asynchronously.
1338  */
1339 static int
1340 vm_bind_job_prepare(struct msm_vm_bind_job *job)
1341 {
1342 	struct msm_gem_vm *vm = to_msm_vm(job->vm);
1343 	struct msm_mmu *mmu = vm->mmu;
1344 	int ret;
1345 
1346 	ret = mmu->funcs->prealloc_allocate(mmu, &job->prealloc);
1347 	if (ret)
1348 		return ret;
1349 
1350 	for (unsigned i = 0; i < job->nr_ops; i++) {
1351 		const struct msm_vm_bind_op *op = &job->ops[i];
1352 		struct op_arg arg = {
1353 			.job = job,
1354 			.op = op,
1355 		};
1356 
1357 		switch (op->op) {
1358 		case MSM_VM_BIND_OP_UNMAP:
1359 			ret = drm_gpuvm_sm_unmap(job->vm, &arg, op->iova,
1360 						 op->range);
1361 			break;
1362 		case MSM_VM_BIND_OP_MAP:
1363 			if (op->flags & MSM_VM_BIND_OP_DUMP)
1364 				arg.flags |= MSM_VMA_DUMP;
1365 			fallthrough;
1366 		case MSM_VM_BIND_OP_MAP_NULL: {
1367 			struct drm_gpuvm_map_req map_req = {
1368 				.map.va.addr = op->iova,
1369 				.map.va.range = op->range,
1370 				.map.gem.obj = op->obj,
1371 				.map.gem.offset = op->obj_offset,
1372 			};
1373 
1374 			ret = drm_gpuvm_sm_map(job->vm, &arg, &map_req);
1375 			break;
1376 		}
1377 		default:
1378 			/*
1379 			 * lookup_op() should have already thrown an error for
1380 			 * invalid ops
1381 			 */
1382 			BUG_ON("unreachable");
1383 		}
1384 
1385 		if (ret) {
1386 			/*
1387 			 * If we've already started modifying the vm, we can't
1388 			 * adequetly describe to userspace the intermediate
1389 			 * state the vm is in.  So throw up our hands!
1390 			 */
1391 			if (i > 0)
1392 				msm_gem_vm_unusable(job->vm);
1393 			return ret;
1394 		}
1395 	}
1396 
1397 	return 0;
1398 }
1399 
1400 /*
1401  * Attach fences to the GEM objects being bound.  This will signify to
1402  * the shrinker that they are busy even after dropping the locks (ie.
1403  * drm_exec_fini())
1404  */
1405 static void
1406 vm_bind_job_attach_fences(struct msm_vm_bind_job *job)
1407 {
1408 	for (unsigned i = 0; i < job->nr_ops; i++) {
1409 		struct drm_gem_object *obj = job->ops[i].obj;
1410 
1411 		if (!obj)
1412 			continue;
1413 
1414 		dma_resv_add_fence(obj->resv, job->fence,
1415 				   DMA_RESV_USAGE_KERNEL);
1416 	}
1417 }
1418 
1419 int
1420 msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file)
1421 {
1422 	struct msm_drm_private *priv = dev->dev_private;
1423 	struct drm_msm_vm_bind *args = data;
1424 	struct msm_context *ctx = file->driver_priv;
1425 	struct msm_vm_bind_job *job = NULL;
1426 	struct msm_gpu *gpu = priv->gpu;
1427 	struct msm_gpu_submitqueue *queue;
1428 	struct msm_syncobj_post_dep *post_deps = NULL;
1429 	struct drm_syncobj **syncobjs_to_reset = NULL;
1430 	struct sync_file *sync_file = NULL;
1431 	struct dma_fence *fence;
1432 	int out_fence_fd = -1;
1433 	int ret, nr_bos = 0;
1434 	unsigned i;
1435 
1436 	if (!gpu)
1437 		return -ENXIO;
1438 
1439 	/*
1440 	 * Maybe we could allow just UNMAP ops?  OTOH userspace should just
1441 	 * immediately close the device file and all will be torn down.
1442 	 */
1443 	if (to_msm_vm(msm_context_vm(dev, ctx))->unusable)
1444 		return UERR(EPIPE, dev, "context is unusable");
1445 
1446 	/*
1447 	 * Technically, you cannot create a VM_BIND submitqueue in the first
1448 	 * place, if you haven't opted in to VM_BIND context.  But it is
1449 	 * cleaner / less confusing, to check this case directly.
1450 	 */
1451 	if (!msm_context_is_vmbind(ctx))
1452 		return UERR(EINVAL, dev, "context does not support vmbind");
1453 
1454 	if (args->flags & ~MSM_VM_BIND_FLAGS)
1455 		return UERR(EINVAL, dev, "invalid flags");
1456 
1457 	queue = msm_submitqueue_get(ctx, args->queue_id);
1458 	if (!queue)
1459 		return -ENOENT;
1460 
1461 	if (!(queue->flags & MSM_SUBMITQUEUE_VM_BIND)) {
1462 		ret = UERR(EINVAL, dev, "Invalid queue type");
1463 		goto out_post_unlock;
1464 	}
1465 
1466 	if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
1467 		out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
1468 		if (out_fence_fd < 0) {
1469 			ret = out_fence_fd;
1470 			goto out_post_unlock;
1471 		}
1472 	}
1473 
1474 	job = vm_bind_job_create(dev, file, queue, args->nr_ops);
1475 	if (IS_ERR(job)) {
1476 		ret = PTR_ERR(job);
1477 		goto out_post_unlock;
1478 	}
1479 
1480 	ret = mutex_lock_interruptible(&queue->lock);
1481 	if (ret)
1482 		goto out_post_unlock;
1483 
1484 	if (args->flags & MSM_VM_BIND_FENCE_FD_IN) {
1485 		struct dma_fence *in_fence;
1486 
1487 		in_fence = sync_file_get_fence(args->fence_fd);
1488 
1489 		if (!in_fence) {
1490 			ret = UERR(EINVAL, dev, "invalid in-fence");
1491 			goto out_unlock;
1492 		}
1493 
1494 		ret = drm_sched_job_add_dependency(&job->base, in_fence);
1495 		if (ret)
1496 			goto out_unlock;
1497 	}
1498 
1499 	if (args->in_syncobjs > 0) {
1500 		syncobjs_to_reset = msm_syncobj_parse_deps(dev, &job->base,
1501 							   file, args->in_syncobjs,
1502 							   args->nr_in_syncobjs,
1503 							   args->syncobj_stride);
1504 		if (IS_ERR(syncobjs_to_reset)) {
1505 			ret = PTR_ERR(syncobjs_to_reset);
1506 			goto out_unlock;
1507 		}
1508 	}
1509 
1510 	if (args->out_syncobjs > 0) {
1511 		post_deps = msm_syncobj_parse_post_deps(dev, file,
1512 							args->out_syncobjs,
1513 							args->nr_out_syncobjs,
1514 							args->syncobj_stride);
1515 		if (IS_ERR(post_deps)) {
1516 			ret = PTR_ERR(post_deps);
1517 			goto out_unlock;
1518 		}
1519 	}
1520 
1521 	ret = vm_bind_job_lookup_ops(job, args, file, &nr_bos);
1522 	if (ret)
1523 		goto out_unlock;
1524 
1525 	ret = vm_bind_prealloc_count(job);
1526 	if (ret)
1527 		goto out_unlock;
1528 
1529 	struct drm_exec exec;
1530 	unsigned flags = DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT;
1531 	drm_exec_init(&exec, flags, nr_bos + 1);
1532 
1533 	ret = vm_bind_job_lock_objects(job, &exec);
1534 	if (ret)
1535 		goto out;
1536 
1537 	ret = vm_bind_job_pin_objects(job);
1538 	if (ret)
1539 		goto out;
1540 
1541 	ret = vm_bind_job_prepare(job);
1542 	if (ret)
1543 		goto out;
1544 
1545 	drm_sched_job_arm(&job->base);
1546 
1547 	job->fence = dma_fence_get(&job->base.s_fence->finished);
1548 
1549 	if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) {
1550 		sync_file = sync_file_create(job->fence);
1551 		if (!sync_file)
1552 			ret = -ENOMEM;
1553 	}
1554 
1555 	if (ret)
1556 		goto out;
1557 
1558 	vm_bind_job_attach_fences(job);
1559 
1560 	/*
1561 	 * The job can be free'd (and fence unref'd) at any point after
1562 	 * drm_sched_entity_push_job(), so we need to hold our own ref
1563 	 */
1564 	fence = dma_fence_get(job->fence);
1565 
1566 	drm_sched_entity_push_job(&job->base);
1567 
1568 	msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs);
1569 	msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, fence);
1570 
1571 	dma_fence_put(fence);
1572 
1573 out:
1574 	if (ret)
1575 		vm_bind_job_unpin_objects(job);
1576 
1577 	drm_exec_fini(&exec);
1578 out_unlock:
1579 	mutex_unlock(&queue->lock);
1580 out_post_unlock:
1581 	if (ret) {
1582 		if (out_fence_fd >= 0)
1583 			put_unused_fd(out_fence_fd);
1584 		if (sync_file)
1585 			fput(sync_file->file);
1586 	} else if (sync_file) {
1587 		fd_install(out_fence_fd, sync_file->file);
1588 		args->fence_fd = out_fence_fd;
1589 	}
1590 
1591 	if (!IS_ERR_OR_NULL(job)) {
1592 		if (ret)
1593 			msm_vma_job_free(&job->base);
1594 	} else {
1595 		/*
1596 		 * If the submit hasn't yet taken ownership of the queue
1597 		 * then we need to drop the reference ourself:
1598 		 */
1599 		msm_submitqueue_put(queue);
1600 	}
1601 
1602 	if (!IS_ERR_OR_NULL(post_deps)) {
1603 		for (i = 0; i < args->nr_out_syncobjs; ++i) {
1604 			kfree(post_deps[i].chain);
1605 			drm_syncobj_put(post_deps[i].syncobj);
1606 		}
1607 		kfree(post_deps);
1608 	}
1609 
1610 	if (!IS_ERR_OR_NULL(syncobjs_to_reset)) {
1611 		for (i = 0; i < args->nr_in_syncobjs; ++i) {
1612 			if (syncobjs_to_reset[i])
1613 				drm_syncobj_put(syncobjs_to_reset[i]);
1614 		}
1615 		kfree(syncobjs_to_reset);
1616 	}
1617 
1618 	return ret;
1619 }
1620