xref: /linux/drivers/gpu/drm/imagination/pvr_vm.c (revision ab779466166348eecf17d20f620aa9a47965c934)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_vm.h"
5 
6 #include "pvr_device.h"
7 #include "pvr_drv.h"
8 #include "pvr_gem.h"
9 #include "pvr_mmu.h"
10 #include "pvr_rogue_fwif.h"
11 #include "pvr_rogue_heap_config.h"
12 
13 #include <drm/drm_exec.h>
14 #include <drm/drm_gem.h>
15 #include <drm/drm_gpuvm.h>
16 
17 #include <linux/container_of.h>
18 #include <linux/err.h>
19 #include <linux/errno.h>
20 #include <linux/gfp_types.h>
21 #include <linux/kref.h>
22 #include <linux/mutex.h>
23 #include <linux/stddef.h>
24 
25 /**
26  * DOC: Memory context
27  *
28  * This is the "top level" datatype in the VM code. It's exposed in the public
29  * API as an opaque handle.
30  */
31 
32 /**
33  * struct pvr_vm_context - Context type used to represent a single VM.
34  */
35 struct pvr_vm_context {
36 	/**
37 	 * @pvr_dev: The PowerVR device to which this context is bound.
38 	 * This binding is immutable for the life of the context.
39 	 */
40 	struct pvr_device *pvr_dev;
41 
42 	/** @mmu_ctx: The context for binding to physical memory. */
43 	struct pvr_mmu_context *mmu_ctx;
44 
45 	/** @gpuva_mgr: GPUVA manager object associated with this context. */
46 	struct drm_gpuvm gpuvm_mgr;
47 
48 	/** @lock: Global lock on this VM. */
49 	struct mutex lock;
50 
51 	/**
52 	 * @fw_mem_ctx_obj: Firmware object representing firmware memory
53 	 * context.
54 	 */
55 	struct pvr_fw_object *fw_mem_ctx_obj;
56 
57 	/** @ref_count: Reference count of object. */
58 	struct kref ref_count;
59 
60 	/**
61 	 * @dummy_gem: GEM object to enable VM reservation. All private BOs
62 	 * should use the @dummy_gem.resv and not their own _resv field.
63 	 */
64 	struct drm_gem_object dummy_gem;
65 };
66 
67 struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
68 {
69 	if (vm_ctx)
70 		kref_get(&vm_ctx->ref_count);
71 
72 	return vm_ctx;
73 }
74 
75 /**
76  * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
77  *                                     page table structure behind a VM context.
78  * @vm_ctx: Target VM context.
79  */
80 dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
81 {
82 	return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx);
83 }
84 
85 /**
86  * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
87  * @vm_ctx: Target VM context.
88  *
89  * This is used to allow private BOs to share a dma_resv for faster fence
90  * updates.
91  *
92  * Returns: The dma_resv pointer.
93  */
94 struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx)
95 {
96 	return vm_ctx->dummy_gem.resv;
97 }
98 
99 /**
100  * DOC: Memory mappings
101  */
102 
103 /**
104  * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
105  */
106 struct pvr_vm_gpuva {
107 	/** @base: The wrapped drm_gpuva object. */
108 	struct drm_gpuva base;
109 };
110 
111 static __always_inline
112 struct pvr_vm_gpuva *to_pvr_vm_gpuva(struct drm_gpuva *gpuva)
113 {
114 	return container_of(gpuva, struct pvr_vm_gpuva, base);
115 }
116 
117 enum pvr_vm_bind_type {
118 	PVR_VM_BIND_TYPE_MAP,
119 	PVR_VM_BIND_TYPE_UNMAP,
120 };
121 
122 /**
123  * struct pvr_vm_bind_op - Context of a map/unmap operation.
124  */
125 struct pvr_vm_bind_op {
126 	/** @type: Map or unmap. */
127 	enum pvr_vm_bind_type type;
128 
129 	/** @pvr_obj: Object associated with mapping (map only). */
130 	struct pvr_gem_object *pvr_obj;
131 
132 	/**
133 	 * @vm_ctx: VM context where the mapping will be created or destroyed.
134 	 */
135 	struct pvr_vm_context *vm_ctx;
136 
137 	/** @mmu_op_ctx: MMU op context. */
138 	struct pvr_mmu_op_context *mmu_op_ctx;
139 
140 	/** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */
141 	struct drm_gpuvm_bo *gpuvm_bo;
142 
143 	/**
144 	 * @new_va: Prealloced VA mapping object (init in callback).
145 	 * Used when creating a mapping.
146 	 */
147 	struct pvr_vm_gpuva *new_va;
148 
149 	/**
150 	 * @prev_va: Prealloced VA mapping object (init in callback).
151 	 * Used when a mapping or unmapping operation overlaps an existing
152 	 * mapping and splits away the beginning into a new mapping.
153 	 */
154 	struct pvr_vm_gpuva *prev_va;
155 
156 	/**
157 	 * @next_va: Prealloced VA mapping object (init in callback).
158 	 * Used when a mapping or unmapping operation overlaps an existing
159 	 * mapping and splits away the end into a new mapping.
160 	 */
161 	struct pvr_vm_gpuva *next_va;
162 
163 	/** @offset: Offset into @pvr_obj to begin mapping from. */
164 	u64 offset;
165 
166 	/** @device_addr: Device-virtual address at the start of the mapping. */
167 	u64 device_addr;
168 
169 	/** @size: Size of the desired mapping. */
170 	u64 size;
171 };
172 
173 /**
174  * pvr_vm_bind_op_exec() - Execute a single bind op.
175  * @bind_op: Bind op context.
176  *
177  * Returns:
178  *  * 0 on success,
179  *  * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or
180  *    a callback function.
181  */
182 static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
183 {
184 	switch (bind_op->type) {
185 	case PVR_VM_BIND_TYPE_MAP:
186 		return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
187 					bind_op, bind_op->device_addr,
188 					bind_op->size,
189 					gem_from_pvr_gem(bind_op->pvr_obj),
190 					bind_op->offset);
191 
192 	case PVR_VM_BIND_TYPE_UNMAP:
193 		return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
194 					  bind_op, bind_op->device_addr,
195 					  bind_op->size);
196 	}
197 
198 	/*
199 	 * This shouldn't happen unless something went wrong
200 	 * in drm_sched.
201 	 */
202 	WARN_ON(1);
203 	return -EINVAL;
204 }
205 
206 static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op)
207 {
208 	drm_gpuvm_bo_put(bind_op->gpuvm_bo);
209 
210 	kfree(bind_op->new_va);
211 	kfree(bind_op->prev_va);
212 	kfree(bind_op->next_va);
213 
214 	if (bind_op->pvr_obj)
215 		pvr_gem_object_put(bind_op->pvr_obj);
216 
217 	if (bind_op->mmu_op_ctx)
218 		pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx);
219 }
220 
221 static int
222 pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
223 			struct pvr_vm_context *vm_ctx,
224 			struct pvr_gem_object *pvr_obj, u64 offset,
225 			u64 device_addr, u64 size)
226 {
227 	const bool is_user = vm_ctx == vm_ctx->pvr_dev->kernel_vm_ctx;
228 	const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
229 	struct sg_table *sgt;
230 	u64 offset_plus_size;
231 	int err;
232 
233 	if (check_add_overflow(offset, size, &offset_plus_size))
234 		return -EINVAL;
235 
236 	if (is_user &&
237 	    !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) {
238 		return -EINVAL;
239 	}
240 
241 	if (!pvr_device_addr_and_size_are_valid(device_addr, size) ||
242 	    offset & ~PAGE_MASK || size & ~PAGE_MASK ||
243 	    offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
244 		return -EINVAL;
245 
246 	bind_op->type = PVR_VM_BIND_TYPE_MAP;
247 
248 	bind_op->gpuvm_bo = drm_gpuvm_bo_create(&vm_ctx->gpuvm_mgr,
249 						gem_from_pvr_gem(pvr_obj));
250 	if (!bind_op->gpuvm_bo)
251 		return -ENOMEM;
252 
253 	bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
254 	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
255 	bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
256 	if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) {
257 		err = -ENOMEM;
258 		goto err_bind_op_fini;
259 	}
260 
261 	/* Pin pages so they're ready for use. */
262 	sgt = pvr_gem_object_get_pages_sgt(pvr_obj);
263 	err = PTR_ERR_OR_ZERO(sgt);
264 	if (err)
265 		goto err_bind_op_fini;
266 
267 	bind_op->mmu_op_ctx =
268 		pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size);
269 	err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
270 	if (err) {
271 		bind_op->mmu_op_ctx = NULL;
272 		goto err_bind_op_fini;
273 	}
274 
275 	bind_op->pvr_obj = pvr_obj;
276 	bind_op->vm_ctx = vm_ctx;
277 	bind_op->device_addr = device_addr;
278 	bind_op->size = size;
279 	bind_op->offset = offset;
280 
281 	return 0;
282 
283 err_bind_op_fini:
284 	pvr_vm_bind_op_fini(bind_op);
285 
286 	return err;
287 }
288 
289 static int
290 pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
291 			  struct pvr_vm_context *vm_ctx, u64 device_addr,
292 			  u64 size)
293 {
294 	int err;
295 
296 	if (!pvr_device_addr_and_size_are_valid(device_addr, size))
297 		return -EINVAL;
298 
299 	bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
300 
301 	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
302 	bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
303 	if (!bind_op->prev_va || !bind_op->next_va) {
304 		err = -ENOMEM;
305 		goto err_bind_op_fini;
306 	}
307 
308 	bind_op->mmu_op_ctx =
309 		pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0);
310 	err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
311 	if (err) {
312 		bind_op->mmu_op_ctx = NULL;
313 		goto err_bind_op_fini;
314 	}
315 
316 	bind_op->vm_ctx = vm_ctx;
317 	bind_op->device_addr = device_addr;
318 	bind_op->size = size;
319 
320 	return 0;
321 
322 err_bind_op_fini:
323 	pvr_vm_bind_op_fini(bind_op);
324 
325 	return err;
326 }
327 
328 static int
329 pvr_vm_bind_op_lock_resvs(struct drm_exec *exec, struct pvr_vm_bind_op *bind_op)
330 {
331 	drm_exec_until_all_locked(exec) {
332 		struct drm_gem_object *r_obj = &bind_op->vm_ctx->dummy_gem;
333 		struct drm_gpuvm *gpuvm = &bind_op->vm_ctx->gpuvm_mgr;
334 		struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
335 		struct drm_gpuvm_bo *gpuvm_bo;
336 
337 		/* Acquire lock on the vm_context's reserve object. */
338 		int err = drm_exec_lock_obj(exec, r_obj);
339 
340 		drm_exec_retry_on_contention(exec);
341 		if (err)
342 			return err;
343 
344 		/* Acquire lock on all BOs in the context. */
345 		list_for_each_entry(gpuvm_bo, &gpuvm->extobj.list,
346 				    list.entry.extobj) {
347 			err = drm_exec_lock_obj(exec, gpuvm_bo->obj);
348 
349 			drm_exec_retry_on_contention(exec);
350 			if (err)
351 				return err;
352 		}
353 
354 		/* Unmap operations don't have an object to lock. */
355 		if (!pvr_obj)
356 			break;
357 
358 		/* Acquire lock on the GEM being mapped. */
359 		err = drm_exec_lock_obj(exec,
360 					gem_from_pvr_gem(bind_op->pvr_obj));
361 
362 		drm_exec_retry_on_contention(exec);
363 		if (err)
364 			return err;
365 	}
366 
367 	return 0;
368 }
369 
370 /**
371  * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
372  * @op: gpuva op containing the remap details.
373  * @op_ctx: Operation context.
374  *
375  * Context: Called by drm_gpuvm_sm_map following a successful mapping while
376  * @op_ctx.vm_ctx mutex is held.
377  *
378  * Return:
379  *  * 0 on success, or
380  *  * Any error returned by pvr_mmu_map().
381  */
382 static int
383 pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx)
384 {
385 	struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj);
386 	struct pvr_vm_bind_op *ctx = op_ctx;
387 	int err;
388 
389 	if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
390 		return -EINVAL;
391 
392 	err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
393 			  op->map.va.addr);
394 	if (err)
395 		return err;
396 
397 	drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map);
398 	drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo);
399 	ctx->new_va = NULL;
400 
401 	return 0;
402 }
403 
404 /**
405  * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context.
406  * @op: gpuva op containing the unmap details.
407  * @op_ctx: Operation context.
408  *
409  * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while
410  * @op_ctx.vm_ctx mutex is held.
411  *
412  * Return:
413  *  * 0 on success, or
414  *  * Any error returned by pvr_mmu_unmap().
415  */
416 static int
417 pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
418 {
419 	struct pvr_vm_bind_op *ctx = op_ctx;
420 
421 	int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr,
422 				op->unmap.va->va.range);
423 
424 	if (err)
425 		return err;
426 
427 	drm_gpuva_unmap(&op->unmap);
428 	drm_gpuva_unlink(op->unmap.va);
429 
430 	return 0;
431 }
432 
433 /**
434  * pvr_vm_gpuva_remap() - Remap a mapping within a memory context.
435  * @op: gpuva op containing the remap details.
436  * @op_ctx: Operation context.
437  *
438  * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a
439  * mapping or unmapping operation causes a region to be split. The
440  * @op_ctx.vm_ctx mutex is held.
441  *
442  * Return:
443  *  * 0 on success, or
444  *  * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap().
445  */
446 static int
447 pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
448 {
449 	struct pvr_vm_bind_op *ctx = op_ctx;
450 	u64 va_start = 0, va_range = 0;
451 	int err;
452 
453 	drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range);
454 	err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range);
455 	if (err)
456 		return err;
457 
458 	/* No actual remap required: the page table tree depth is fixed to 3,
459 	 * and we use 4k page table entries only for now.
460 	 */
461 	drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap);
462 
463 	if (op->remap.prev) {
464 		pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj));
465 		drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo);
466 		ctx->prev_va = NULL;
467 	}
468 
469 	if (op->remap.next) {
470 		pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj));
471 		drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo);
472 		ctx->next_va = NULL;
473 	}
474 
475 	drm_gpuva_unlink(op->remap.unmap->va);
476 
477 	return 0;
478 }
479 
480 /*
481  * Public API
482  *
483  * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
484  */
485 
486 /**
487  * pvr_device_addr_is_valid() - Tests whether a device-virtual address
488  *                              is valid.
489  * @device_addr: Virtual device address to test.
490  *
491  * Return:
492  *  * %true if @device_addr is within the valid range for a device page
493  *    table and is aligned to the device page size, or
494  *  * %false otherwise.
495  */
496 bool
497 pvr_device_addr_is_valid(u64 device_addr)
498 {
499 	return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
500 	       (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
501 }
502 
503 /**
504  * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
505  * address and associated size are both valid.
506  * @device_addr: Virtual device address to test.
507  * @size: Size of the range based at @device_addr to test.
508  *
509  * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
510  * @device_addr + @size) to verify a device-virtual address range initially
511  * seems intuitive, but it produces a false-negative when the address range
512  * is right at the end of device-virtual address space.
513  *
514  * This function catches that corner case, as well as checking that
515  * @size is non-zero.
516  *
517  * Return:
518  *  * %true if @device_addr is device page aligned; @size is device page
519  *    aligned; the range specified by @device_addr and @size is within the
520  *    bounds of the device-virtual address space, and @size is non-zero, or
521  *  * %false otherwise.
522  */
523 bool
524 pvr_device_addr_and_size_are_valid(u64 device_addr, u64 size)
525 {
526 	return pvr_device_addr_is_valid(device_addr) &&
527 	       size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
528 	       (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
529 }
530 
531 void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
532 {
533 
534 }
535 
536 static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
537 	.vm_free = pvr_gpuvm_free,
538 	.sm_step_map = pvr_vm_gpuva_map,
539 	.sm_step_remap = pvr_vm_gpuva_remap,
540 	.sm_step_unmap = pvr_vm_gpuva_unmap,
541 };
542 
543 static void
544 fw_mem_context_init(void *cpu_ptr, void *priv)
545 {
546 	struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr;
547 	struct pvr_vm_context *vm_ctx = priv;
548 
549 	fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx);
550 	fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET;
551 }
552 
553 /**
554  * pvr_vm_create_context() - Create a new VM context.
555  * @pvr_dev: Target PowerVR device.
556  * @is_userspace_context: %true if this context is for userspace. This will
557  *                        create a firmware memory context for the VM context
558  *                        and disable warnings when tearing down mappings.
559  *
560  * Return:
561  *  * A handle to the newly-minted VM context on success,
562  *  * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
563  *    missing or has an unsupported value,
564  *  * -%ENOMEM if allocation of the structure behind the opaque handle fails,
565  *    or
566  *  * Any error encountered while setting up internal structures.
567  */
568 struct pvr_vm_context *
569 pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
570 {
571 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
572 
573 	struct pvr_vm_context *vm_ctx;
574 	u16 device_addr_bits;
575 
576 	int err;
577 
578 	err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
579 				&device_addr_bits);
580 	if (err) {
581 		drm_err(drm_dev,
582 			"Failed to get device virtual address space bits\n");
583 		return ERR_PTR(err);
584 	}
585 
586 	if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
587 		drm_err(drm_dev,
588 			"Device has unsupported virtual address space size\n");
589 		return ERR_PTR(-EINVAL);
590 	}
591 
592 	vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
593 	if (!vm_ctx)
594 		return ERR_PTR(-ENOMEM);
595 
596 	drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
597 
598 	vm_ctx->pvr_dev = pvr_dev;
599 	kref_init(&vm_ctx->ref_count);
600 	mutex_init(&vm_ctx->lock);
601 
602 	drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
603 		       is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
604 		       0, &pvr_dev->base, &vm_ctx->dummy_gem,
605 		       0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
606 
607 	vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
608 	err = PTR_ERR_OR_ZERO(&vm_ctx->mmu_ctx);
609 	if (err) {
610 		vm_ctx->mmu_ctx = NULL;
611 		goto err_put_ctx;
612 	}
613 
614 	if (is_userspace_context) {
615 		err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext),
616 					   PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
617 					   fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj);
618 
619 		if (err)
620 			goto err_page_table_destroy;
621 	}
622 
623 	return vm_ctx;
624 
625 err_page_table_destroy:
626 	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
627 
628 err_put_ctx:
629 	pvr_vm_context_put(vm_ctx);
630 
631 	return ERR_PTR(err);
632 }
633 
634 /**
635  * pvr_vm_context_release() - Teardown a VM context.
636  * @ref_count: Pointer to reference counter of the VM context.
637  *
638  * This function ensures that no mappings are left dangling by unmapping them
639  * all in order of ascending device-virtual address.
640  */
641 static void
642 pvr_vm_context_release(struct kref *ref_count)
643 {
644 	struct pvr_vm_context *vm_ctx =
645 		container_of(ref_count, struct pvr_vm_context, ref_count);
646 
647 	if (vm_ctx->fw_mem_ctx_obj)
648 		pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
649 
650 	WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
651 			     vm_ctx->gpuvm_mgr.mm_range));
652 
653 	drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
654 	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
655 	drm_gem_private_object_fini(&vm_ctx->dummy_gem);
656 	mutex_destroy(&vm_ctx->lock);
657 
658 	kfree(vm_ctx);
659 }
660 
661 /**
662  * pvr_vm_context_lookup() - Look up VM context from handle
663  * @pvr_file: Pointer to pvr_file structure.
664  * @handle: Object handle.
665  *
666  * Takes reference on VM context object. Call pvr_vm_context_put() to release.
667  *
668  * Returns:
669  *  * The requested object on success, or
670  *  * %NULL on failure (object does not exist in list, or is not a VM context)
671  */
672 struct pvr_vm_context *
673 pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
674 {
675 	struct pvr_vm_context *vm_ctx;
676 
677 	xa_lock(&pvr_file->vm_ctx_handles);
678 	vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
679 	if (vm_ctx)
680 		kref_get(&vm_ctx->ref_count);
681 
682 	xa_unlock(&pvr_file->vm_ctx_handles);
683 
684 	return vm_ctx;
685 }
686 
687 /**
688  * pvr_vm_context_put() - Release a reference on a VM context
689  * @vm_ctx: Target VM context.
690  *
691  * Returns:
692  *  * %true if the VM context was destroyed, or
693  *  * %false if there are any references still remaining.
694  */
695 bool
696 pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
697 {
698 	if (vm_ctx)
699 		return kref_put(&vm_ctx->ref_count, pvr_vm_context_release);
700 
701 	return true;
702 }
703 
704 /**
705  * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
706  * given file.
707  * @pvr_file: Pointer to pvr_file structure.
708  *
709  * Removes all vm_contexts associated with @pvr_file from the device VM context
710  * list and drops initial references. vm_contexts will then be destroyed once
711  * all outstanding references are dropped.
712  */
713 void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
714 {
715 	struct pvr_vm_context *vm_ctx;
716 	unsigned long handle;
717 
718 	xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
719 		/* vm_ctx is not used here because that would create a race with xa_erase */
720 		pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
721 	}
722 }
723 
724 /**
725  * pvr_vm_map() - Map a section of physical memory into a section of
726  * device-virtual memory.
727  * @vm_ctx: Target VM context.
728  * @pvr_obj: Target PowerVR memory object.
729  * @pvr_obj_offset: Offset into @pvr_obj to map from.
730  * @device_addr: Virtual device address at the start of the requested mapping.
731  * @size: Size of the requested mapping.
732  *
733  * No handle is returned to represent the mapping. Instead, callers should
734  * remember @device_addr and use that as a handle.
735  *
736  * Return:
737  *  * 0 on success,
738  *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
739  *    address; the region specified by @pvr_obj_offset and @size does not fall
740  *    entirely within @pvr_obj, or any part of the specified region of @pvr_obj
741  *    is not device-virtual page-aligned,
742  *  * Any error encountered while performing internal operations required to
743  *    destroy the mapping (returned from pvr_vm_gpuva_map or
744  *    pvr_vm_gpuva_remap).
745  */
746 int
747 pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
748 	   u64 pvr_obj_offset, u64 device_addr, u64 size)
749 {
750 	struct pvr_vm_bind_op bind_op = {0};
751 	struct drm_exec exec;
752 
753 	int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
754 					  pvr_obj_offset, device_addr,
755 					  size);
756 
757 	if (err)
758 		return err;
759 
760 	drm_exec_init(&exec,
761 		      DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES);
762 
763 	pvr_gem_object_get(pvr_obj);
764 
765 	err = pvr_vm_bind_op_lock_resvs(&exec, &bind_op);
766 	if (err)
767 		goto err_cleanup;
768 
769 	err = pvr_vm_bind_op_exec(&bind_op);
770 
771 	drm_exec_fini(&exec);
772 
773 err_cleanup:
774 	pvr_vm_bind_op_fini(&bind_op);
775 
776 	return err;
777 }
778 
779 /**
780  * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
781  * @vm_ctx: Target VM context.
782  * @device_addr: Virtual device address at the start of the target mapping.
783  * @size: Size of the target mapping.
784  *
785  * Return:
786  *  * 0 on success,
787  *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
788  *    address,
789  *  * Any error encountered while performing internal operations required to
790  *    destroy the mapping (returned from pvr_vm_gpuva_unmap or
791  *    pvr_vm_gpuva_remap).
792  */
793 int
794 pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
795 {
796 	struct pvr_vm_bind_op bind_op = {0};
797 	struct drm_exec exec;
798 
799 	int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
800 					    size);
801 
802 	if (err)
803 		return err;
804 
805 	drm_exec_init(&exec,
806 		      DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES);
807 
808 	err = pvr_vm_bind_op_lock_resvs(&exec, &bind_op);
809 	if (err)
810 		goto err_cleanup;
811 
812 	err = pvr_vm_bind_op_exec(&bind_op);
813 
814 	drm_exec_fini(&exec);
815 
816 err_cleanup:
817 	pvr_vm_bind_op_fini(&bind_op);
818 
819 	return err;
820 }
821 
822 /* Static data areas are determined by firmware. */
823 static const struct drm_pvr_static_data_area static_data_areas[] = {
824 	{
825 		.area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
826 		.location_heap_id = DRM_PVR_HEAP_GENERAL,
827 		.offset = 0,
828 		.size = 128,
829 	},
830 	{
831 		.area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
832 		.location_heap_id = DRM_PVR_HEAP_GENERAL,
833 		.offset = 128,
834 		.size = 1024,
835 	},
836 	{
837 		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
838 		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
839 		.offset = 0,
840 		.size = 128,
841 	},
842 	{
843 		.area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
844 		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
845 		.offset = 128,
846 		.size = 128,
847 	},
848 	{
849 		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
850 		.location_heap_id = DRM_PVR_HEAP_USC_CODE,
851 		.offset = 0,
852 		.size = 128,
853 	},
854 };
855 
856 #define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
857 
858 /*
859  * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
860  * static data area for each heap.
861  */
862 static const struct drm_pvr_heap pvr_heaps[] = {
863 	[DRM_PVR_HEAP_GENERAL] = {
864 		.base = ROGUE_GENERAL_HEAP_BASE,
865 		.size = ROGUE_GENERAL_HEAP_SIZE,
866 		.flags = 0,
867 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
868 	},
869 	[DRM_PVR_HEAP_PDS_CODE_DATA] = {
870 		.base = ROGUE_PDSCODEDATA_HEAP_BASE,
871 		.size = ROGUE_PDSCODEDATA_HEAP_SIZE,
872 		.flags = 0,
873 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
874 	},
875 	[DRM_PVR_HEAP_USC_CODE] = {
876 		.base = ROGUE_USCCODE_HEAP_BASE,
877 		.size = ROGUE_USCCODE_HEAP_SIZE,
878 		.flags = 0,
879 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
880 	},
881 	[DRM_PVR_HEAP_RGNHDR] = {
882 		.base = ROGUE_RGNHDR_HEAP_BASE,
883 		.size = ROGUE_RGNHDR_HEAP_SIZE,
884 		.flags = 0,
885 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
886 	},
887 	[DRM_PVR_HEAP_VIS_TEST] = {
888 		.base = ROGUE_VISTEST_HEAP_BASE,
889 		.size = ROGUE_VISTEST_HEAP_SIZE,
890 		.flags = 0,
891 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
892 	},
893 	[DRM_PVR_HEAP_TRANSFER_FRAG] = {
894 		.base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
895 		.size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
896 		.flags = 0,
897 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
898 	},
899 };
900 
901 int
902 pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
903 			  struct drm_pvr_ioctl_dev_query_args *args)
904 {
905 	struct drm_pvr_dev_query_static_data_areas query = {0};
906 	int err;
907 
908 	if (!args->pointer) {
909 		args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
910 		return 0;
911 	}
912 
913 	err = PVR_UOBJ_GET(query, args->size, args->pointer);
914 	if (err < 0)
915 		return err;
916 
917 	if (!query.static_data_areas.array) {
918 		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
919 		query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
920 		goto copy_out;
921 	}
922 
923 	if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
924 		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
925 
926 	err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
927 	if (err < 0)
928 		return err;
929 
930 copy_out:
931 	err = PVR_UOBJ_SET(args->pointer, args->size, query);
932 	if (err < 0)
933 		return err;
934 
935 	args->size = sizeof(query);
936 	return 0;
937 }
938 
939 int
940 pvr_heap_info_get(const struct pvr_device *pvr_dev,
941 		  struct drm_pvr_ioctl_dev_query_args *args)
942 {
943 	struct drm_pvr_dev_query_heap_info query = {0};
944 	u64 dest;
945 	int err;
946 
947 	if (!args->pointer) {
948 		args->size = sizeof(struct drm_pvr_dev_query_heap_info);
949 		return 0;
950 	}
951 
952 	err = PVR_UOBJ_GET(query, args->size, args->pointer);
953 	if (err < 0)
954 		return err;
955 
956 	if (!query.heaps.array) {
957 		query.heaps.count = ARRAY_SIZE(pvr_heaps);
958 		query.heaps.stride = sizeof(struct drm_pvr_heap);
959 		goto copy_out;
960 	}
961 
962 	if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
963 		query.heaps.count = ARRAY_SIZE(pvr_heaps);
964 
965 	/* Region header heap is only present if BRN63142 is present. */
966 	dest = query.heaps.array;
967 	for (size_t i = 0; i < query.heaps.count; i++) {
968 		struct drm_pvr_heap heap = pvr_heaps[i];
969 
970 		if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
971 			heap.size = 0;
972 
973 		err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
974 		if (err < 0)
975 			return err;
976 
977 		dest += query.heaps.stride;
978 	}
979 
980 copy_out:
981 	err = PVR_UOBJ_SET(args->pointer, args->size, query);
982 	if (err < 0)
983 		return err;
984 
985 	args->size = sizeof(query);
986 	return 0;
987 }
988 
989 /**
990  * pvr_heap_contains_range() - Determine if a given heap contains the specified
991  *                             device-virtual address range.
992  * @pvr_heap: Target heap.
993  * @start: Inclusive start of the target range.
994  * @end: Inclusive end of the target range.
995  *
996  * It is an error to call this function with values of @start and @end that do
997  * not satisfy the condition @start <= @end.
998  */
999 static __always_inline bool
1000 pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
1001 {
1002 	return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
1003 }
1004 
1005 /**
1006  * pvr_find_heap_containing() - Find a heap which contains the specified
1007  *                              device-virtual address range.
1008  * @pvr_dev: Target PowerVR device.
1009  * @start: Start of the target range.
1010  * @size: Size of the target range.
1011  *
1012  * Return:
1013  *  * A pointer to a constant instance of struct drm_pvr_heap representing the
1014  *    heap containing the entire range specified by @start and @size on
1015  *    success, or
1016  *  * %NULL if no such heap exists.
1017  */
1018 const struct drm_pvr_heap *
1019 pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
1020 {
1021 	u64 end;
1022 
1023 	if (check_add_overflow(start, size - 1, &end))
1024 		return NULL;
1025 
1026 	/*
1027 	 * There are no guarantees about the order of address ranges in
1028 	 * &pvr_heaps, so iterate over the entire array for a heap whose
1029 	 * range completely encompasses the given range.
1030 	 */
1031 	for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
1032 		/* Filter heaps that present only with an associated quirk */
1033 		if (heap_id == DRM_PVR_HEAP_RGNHDR &&
1034 		    !PVR_HAS_QUIRK(pvr_dev, 63142)) {
1035 			continue;
1036 		}
1037 
1038 		if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
1039 			return &pvr_heaps[heap_id];
1040 	}
1041 
1042 	return NULL;
1043 }
1044 
1045 /**
1046  * pvr_vm_find_gem_object() - Look up a buffer object from a given
1047  *                            device-virtual address.
1048  * @vm_ctx: [IN] Target VM context.
1049  * @device_addr: [IN] Virtual device address at the start of the required
1050  *               object.
1051  * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
1052  *                     of the mapped region within the buffer object. May be
1053  *                     %NULL if this information is not required.
1054  * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
1055  *                   region. May be %NULL if this information is not required.
1056  *
1057  * If successful, a reference will be taken on the buffer object. The caller
1058  * must drop the reference with pvr_gem_object_put().
1059  *
1060  * Return:
1061  *  * The PowerVR buffer object mapped at @device_addr if one exists, or
1062  *  * %NULL otherwise.
1063  */
1064 struct pvr_gem_object *
1065 pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
1066 		       u64 *mapped_offset_out, u64 *mapped_size_out)
1067 {
1068 	struct pvr_gem_object *pvr_obj;
1069 	struct drm_gpuva *va;
1070 
1071 	mutex_lock(&vm_ctx->lock);
1072 
1073 	va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1);
1074 	if (!va)
1075 		goto err_unlock;
1076 
1077 	pvr_obj = gem_to_pvr_gem(va->gem.obj);
1078 	pvr_gem_object_get(pvr_obj);
1079 
1080 	if (mapped_offset_out)
1081 		*mapped_offset_out = va->gem.offset;
1082 	if (mapped_size_out)
1083 		*mapped_size_out = va->va.range;
1084 
1085 	mutex_unlock(&vm_ctx->lock);
1086 
1087 	return pvr_obj;
1088 
1089 err_unlock:
1090 	mutex_unlock(&vm_ctx->lock);
1091 
1092 	return NULL;
1093 }
1094 
1095 /**
1096  * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
1097  * @vm_ctx: Target VM context.
1098  *
1099  * Returns:
1100  *  * FW object representing firmware memory context, or
1101  *  * %NULL if this VM context does not have a firmware memory context.
1102  */
1103 struct pvr_fw_object *
1104 pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
1105 {
1106 	return vm_ctx->fw_mem_ctx_obj;
1107 }
1108