xref: /linux/drivers/gpu/drm/imagination/pvr_vm.c (revision 28f587adb69957125241a8df359b68b134f3c4a1)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_vm.h"
5 
6 #include "pvr_device.h"
7 #include "pvr_drv.h"
8 #include "pvr_gem.h"
9 #include "pvr_mmu.h"
10 #include "pvr_rogue_fwif.h"
11 #include "pvr_rogue_heap_config.h"
12 
13 #include <drm/drm_exec.h>
14 #include <drm/drm_gem.h>
15 #include <drm/drm_gpuvm.h>
16 
17 #include <linux/bug.h>
18 #include <linux/container_of.h>
19 #include <linux/err.h>
20 #include <linux/errno.h>
21 #include <linux/gfp_types.h>
22 #include <linux/kref.h>
23 #include <linux/mutex.h>
24 #include <linux/stddef.h>
25 
26 /**
27  * DOC: Memory context
28  *
29  * This is the "top level" datatype in the VM code. It's exposed in the public
30  * API as an opaque handle.
31  */
32 
33 /**
34  * struct pvr_vm_context - Context type used to represent a single VM.
35  */
36 struct pvr_vm_context {
37 	/**
38 	 * @pvr_dev: The PowerVR device to which this context is bound.
39 	 * This binding is immutable for the life of the context.
40 	 */
41 	struct pvr_device *pvr_dev;
42 
43 	/** @mmu_ctx: The context for binding to physical memory. */
44 	struct pvr_mmu_context *mmu_ctx;
45 
46 	/** @gpuvm_mgr: GPUVM object associated with this context. */
47 	struct drm_gpuvm gpuvm_mgr;
48 
49 	/** @lock: Global lock on this VM. */
50 	struct mutex lock;
51 
52 	/**
53 	 * @fw_mem_ctx_obj: Firmware object representing firmware memory
54 	 * context.
55 	 */
56 	struct pvr_fw_object *fw_mem_ctx_obj;
57 
58 	/** @ref_count: Reference count of object. */
59 	struct kref ref_count;
60 
61 	/**
62 	 * @dummy_gem: GEM object to enable VM reservation. All private BOs
63 	 * should use the @dummy_gem.resv and not their own _resv field.
64 	 */
65 	struct drm_gem_object dummy_gem;
66 };
67 
68 static inline
to_pvr_vm_context(struct drm_gpuvm * gpuvm)69 struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
70 {
71 	return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
72 }
73 
pvr_vm_context_get(struct pvr_vm_context * vm_ctx)74 struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
75 {
76 	if (vm_ctx)
77 		kref_get(&vm_ctx->ref_count);
78 
79 	return vm_ctx;
80 }
81 
82 /**
83  * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
84  *                                     page table structure behind a VM context.
85  * @vm_ctx: Target VM context.
86  */
pvr_vm_get_page_table_root_addr(struct pvr_vm_context * vm_ctx)87 dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
88 {
89 	return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx);
90 }
91 
92 /**
93  * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
94  * @vm_ctx: Target VM context.
95  *
96  * This is used to allow private BOs to share a dma_resv for faster fence
97  * updates.
98  *
99  * Returns: The dma_resv pointer.
100  */
pvr_vm_get_dma_resv(struct pvr_vm_context * vm_ctx)101 struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx)
102 {
103 	return vm_ctx->dummy_gem.resv;
104 }
105 
106 /**
107  * DOC: Memory mappings
108  */
109 
110 /**
111  * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
112  */
113 struct pvr_vm_gpuva {
114 	/** @base: The wrapped drm_gpuva object. */
115 	struct drm_gpuva base;
116 };
117 
118 #define to_pvr_vm_gpuva(va) container_of_const(va, struct pvr_vm_gpuva, base)
119 
120 enum pvr_vm_bind_type {
121 	PVR_VM_BIND_TYPE_MAP,
122 	PVR_VM_BIND_TYPE_UNMAP,
123 };
124 
125 /**
126  * struct pvr_vm_bind_op - Context of a map/unmap operation.
127  */
128 struct pvr_vm_bind_op {
129 	/** @type: Map or unmap. */
130 	enum pvr_vm_bind_type type;
131 
132 	/** @pvr_obj: Object associated with mapping (map only). */
133 	struct pvr_gem_object *pvr_obj;
134 
135 	/**
136 	 * @vm_ctx: VM context where the mapping will be created or destroyed.
137 	 */
138 	struct pvr_vm_context *vm_ctx;
139 
140 	/** @mmu_op_ctx: MMU op context. */
141 	struct pvr_mmu_op_context *mmu_op_ctx;
142 
143 	/** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */
144 	struct drm_gpuvm_bo *gpuvm_bo;
145 
146 	/**
147 	 * @new_va: Prealloced VA mapping object (init in callback).
148 	 * Used when creating a mapping.
149 	 */
150 	struct pvr_vm_gpuva *new_va;
151 
152 	/**
153 	 * @prev_va: Prealloced VA mapping object (init in callback).
154 	 * Used when a mapping or unmapping operation overlaps an existing
155 	 * mapping and splits away the beginning into a new mapping.
156 	 */
157 	struct pvr_vm_gpuva *prev_va;
158 
159 	/**
160 	 * @next_va: Prealloced VA mapping object (init in callback).
161 	 * Used when a mapping or unmapping operation overlaps an existing
162 	 * mapping and splits away the end into a new mapping.
163 	 */
164 	struct pvr_vm_gpuva *next_va;
165 
166 	/** @offset: Offset into @pvr_obj to begin mapping from. */
167 	u64 offset;
168 
169 	/** @device_addr: Device-virtual address at the start of the mapping. */
170 	u64 device_addr;
171 
172 	/** @size: Size of the desired mapping. */
173 	u64 size;
174 };
175 
176 /**
177  * pvr_vm_bind_op_exec() - Execute a single bind op.
178  * @bind_op: Bind op context.
179  *
180  * Returns:
181  *  * 0 on success,
182  *  * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or
183  *    a callback function.
184  */
pvr_vm_bind_op_exec(struct pvr_vm_bind_op * bind_op)185 static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
186 {
187 	switch (bind_op->type) {
188 	case PVR_VM_BIND_TYPE_MAP:
189 		return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
190 					bind_op, bind_op->device_addr,
191 					bind_op->size,
192 					gem_from_pvr_gem(bind_op->pvr_obj),
193 					bind_op->offset);
194 
195 	case PVR_VM_BIND_TYPE_UNMAP:
196 		return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
197 					  bind_op, bind_op->device_addr,
198 					  bind_op->size);
199 	}
200 
201 	/*
202 	 * This shouldn't happen unless something went wrong
203 	 * in drm_sched.
204 	 */
205 	WARN_ON(1);
206 	return -EINVAL;
207 }
208 
pvr_vm_bind_op_fini(struct pvr_vm_bind_op * bind_op)209 static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op)
210 {
211 	drm_gpuvm_bo_put(bind_op->gpuvm_bo);
212 
213 	kfree(bind_op->new_va);
214 	kfree(bind_op->prev_va);
215 	kfree(bind_op->next_va);
216 
217 	if (bind_op->pvr_obj)
218 		pvr_gem_object_put(bind_op->pvr_obj);
219 
220 	if (bind_op->mmu_op_ctx)
221 		pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx);
222 }
223 
224 static int
pvr_vm_bind_op_map_init(struct pvr_vm_bind_op * bind_op,struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 offset,u64 device_addr,u64 size)225 pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
226 			struct pvr_vm_context *vm_ctx,
227 			struct pvr_gem_object *pvr_obj, u64 offset,
228 			u64 device_addr, u64 size)
229 {
230 	struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
231 	const bool is_user = vm_ctx != vm_ctx->pvr_dev->kernel_vm_ctx;
232 	const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
233 	struct sg_table *sgt;
234 	u64 offset_plus_size;
235 	int err;
236 
237 	if (check_add_overflow(offset, size, &offset_plus_size))
238 		return -EINVAL;
239 
240 	if (is_user &&
241 	    !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) {
242 		return -EINVAL;
243 	}
244 
245 	if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) ||
246 	    offset & ~PAGE_MASK || size & ~PAGE_MASK ||
247 	    offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
248 		return -EINVAL;
249 
250 	bind_op->type = PVR_VM_BIND_TYPE_MAP;
251 
252 	dma_resv_lock(obj->resv, NULL);
253 	bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
254 	dma_resv_unlock(obj->resv);
255 	if (IS_ERR(bind_op->gpuvm_bo))
256 		return PTR_ERR(bind_op->gpuvm_bo);
257 
258 	bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
259 	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
260 	bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
261 	if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) {
262 		err = -ENOMEM;
263 		goto err_bind_op_fini;
264 	}
265 
266 	/* Pin pages so they're ready for use. */
267 	sgt = pvr_gem_object_get_pages_sgt(pvr_obj);
268 	err = PTR_ERR_OR_ZERO(sgt);
269 	if (err)
270 		goto err_bind_op_fini;
271 
272 	bind_op->mmu_op_ctx =
273 		pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size);
274 	err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
275 	if (err) {
276 		bind_op->mmu_op_ctx = NULL;
277 		goto err_bind_op_fini;
278 	}
279 
280 	bind_op->pvr_obj = pvr_obj;
281 	bind_op->vm_ctx = vm_ctx;
282 	bind_op->device_addr = device_addr;
283 	bind_op->size = size;
284 	bind_op->offset = offset;
285 
286 	return 0;
287 
288 err_bind_op_fini:
289 	pvr_vm_bind_op_fini(bind_op);
290 
291 	return err;
292 }
293 
294 static int
pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op * bind_op,struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 device_addr,u64 size)295 pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
296 			  struct pvr_vm_context *vm_ctx,
297 			  struct pvr_gem_object *pvr_obj,
298 			  u64 device_addr, u64 size)
299 {
300 	int err;
301 
302 	if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size))
303 		return -EINVAL;
304 
305 	bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
306 
307 	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
308 	bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
309 	if (!bind_op->prev_va || !bind_op->next_va) {
310 		err = -ENOMEM;
311 		goto err_bind_op_fini;
312 	}
313 
314 	bind_op->mmu_op_ctx =
315 		pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0);
316 	err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
317 	if (err) {
318 		bind_op->mmu_op_ctx = NULL;
319 		goto err_bind_op_fini;
320 	}
321 
322 	bind_op->pvr_obj = pvr_obj;
323 	bind_op->vm_ctx = vm_ctx;
324 	bind_op->device_addr = device_addr;
325 	bind_op->size = size;
326 
327 	return 0;
328 
329 err_bind_op_fini:
330 	pvr_vm_bind_op_fini(bind_op);
331 
332 	return err;
333 }
334 
335 /**
336  * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
337  * @op: gpuva op containing the remap details.
338  * @op_ctx: Operation context.
339  *
340  * Context: Called by drm_gpuvm_sm_map following a successful mapping while
341  * @op_ctx.vm_ctx mutex is held.
342  *
343  * Return:
344  *  * 0 on success, or
345  *  * Any error returned by pvr_mmu_map().
346  */
347 static int
pvr_vm_gpuva_map(struct drm_gpuva_op * op,void * op_ctx)348 pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx)
349 {
350 	struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj);
351 	struct pvr_vm_bind_op *ctx = op_ctx;
352 	int err;
353 
354 	if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
355 		return -EINVAL;
356 
357 	err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
358 			  op->map.va.addr);
359 	if (err)
360 		return err;
361 
362 	drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map);
363 	drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo);
364 	ctx->new_va = NULL;
365 
366 	return 0;
367 }
368 
369 /**
370  * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context.
371  * @op: gpuva op containing the unmap details.
372  * @op_ctx: Operation context.
373  *
374  * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while
375  * @op_ctx.vm_ctx mutex is held.
376  *
377  * Return:
378  *  * 0 on success, or
379  *  * Any error returned by pvr_mmu_unmap().
380  */
381 static int
pvr_vm_gpuva_unmap(struct drm_gpuva_op * op,void * op_ctx)382 pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
383 {
384 	struct pvr_vm_bind_op *ctx = op_ctx;
385 
386 	int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr,
387 				op->unmap.va->va.range);
388 
389 	if (err)
390 		return err;
391 
392 	drm_gpuva_unmap(&op->unmap);
393 	drm_gpuva_unlink(op->unmap.va);
394 	kfree(to_pvr_vm_gpuva(op->unmap.va));
395 
396 	return 0;
397 }
398 
399 /**
400  * pvr_vm_gpuva_remap() - Remap a mapping within a memory context.
401  * @op: gpuva op containing the remap details.
402  * @op_ctx: Operation context.
403  *
404  * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a
405  * mapping or unmapping operation causes a region to be split. The
406  * @op_ctx.vm_ctx mutex is held.
407  *
408  * Return:
409  *  * 0 on success, or
410  *  * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap().
411  */
412 static int
pvr_vm_gpuva_remap(struct drm_gpuva_op * op,void * op_ctx)413 pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
414 {
415 	struct pvr_vm_bind_op *ctx = op_ctx;
416 	u64 va_start = 0, va_range = 0;
417 	int err;
418 
419 	drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range);
420 	err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range);
421 	if (err)
422 		return err;
423 
424 	/* No actual remap required: the page table tree depth is fixed to 3,
425 	 * and we use 4k page table entries only for now.
426 	 */
427 	drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap);
428 
429 	if (op->remap.prev) {
430 		pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj));
431 		drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo);
432 		ctx->prev_va = NULL;
433 	}
434 
435 	if (op->remap.next) {
436 		pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj));
437 		drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo);
438 		ctx->next_va = NULL;
439 	}
440 
441 	drm_gpuva_unlink(op->remap.unmap->va);
442 	kfree(to_pvr_vm_gpuva(op->remap.unmap->va));
443 
444 	return 0;
445 }
446 
447 /*
448  * Public API
449  *
450  * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
451  */
452 
453 /**
454  * pvr_device_addr_is_valid() - Tests whether a device-virtual address
455  *                              is valid.
456  * @device_addr: Virtual device address to test.
457  *
458  * Return:
459  *  * %true if @device_addr is within the valid range for a device page
460  *    table and is aligned to the device page size, or
461  *  * %false otherwise.
462  */
463 bool
pvr_device_addr_is_valid(u64 device_addr)464 pvr_device_addr_is_valid(u64 device_addr)
465 {
466 	return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
467 	       (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
468 }
469 
470 /**
471  * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
472  * address and associated size are both valid.
473  * @vm_ctx: Target VM context.
474  * @device_addr: Virtual device address to test.
475  * @size: Size of the range based at @device_addr to test.
476  *
477  * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
478  * @device_addr + @size) to verify a device-virtual address range initially
479  * seems intuitive, but it produces a false-negative when the address range
480  * is right at the end of device-virtual address space.
481  *
482  * This function catches that corner case, as well as checking that
483  * @size is non-zero.
484  *
485  * Return:
486  *  * %true if @device_addr is device page aligned; @size is device page
487  *    aligned; the range specified by @device_addr and @size is within the
488  *    bounds of the device-virtual address space, and @size is non-zero, or
489  *  * %false otherwise.
490  */
491 bool
pvr_device_addr_and_size_are_valid(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)492 pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
493 				   u64 device_addr, u64 size)
494 {
495 	return pvr_device_addr_is_valid(device_addr) &&
496 	       drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) &&
497 	       size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
498 	       (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
499 }
500 
pvr_gpuvm_free(struct drm_gpuvm * gpuvm)501 static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
502 {
503 	kfree(to_pvr_vm_context(gpuvm));
504 }
505 
506 static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
507 	.vm_free = pvr_gpuvm_free,
508 	.sm_step_map = pvr_vm_gpuva_map,
509 	.sm_step_remap = pvr_vm_gpuva_remap,
510 	.sm_step_unmap = pvr_vm_gpuva_unmap,
511 };
512 
513 static void
fw_mem_context_init(void * cpu_ptr,void * priv)514 fw_mem_context_init(void *cpu_ptr, void *priv)
515 {
516 	struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr;
517 	struct pvr_vm_context *vm_ctx = priv;
518 
519 	fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx);
520 	fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET;
521 }
522 
523 /**
524  * pvr_vm_create_context() - Create a new VM context.
525  * @pvr_dev: Target PowerVR device.
526  * @is_userspace_context: %true if this context is for userspace. This will
527  *                        create a firmware memory context for the VM context
528  *                        and disable warnings when tearing down mappings.
529  *
530  * Return:
531  *  * A handle to the newly-minted VM context on success,
532  *  * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
533  *    missing or has an unsupported value,
534  *  * -%ENOMEM if allocation of the structure behind the opaque handle fails,
535  *    or
536  *  * Any error encountered while setting up internal structures.
537  */
538 struct pvr_vm_context *
pvr_vm_create_context(struct pvr_device * pvr_dev,bool is_userspace_context)539 pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
540 {
541 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
542 
543 	struct pvr_vm_context *vm_ctx;
544 	u16 device_addr_bits;
545 
546 	int err;
547 
548 	err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
549 				&device_addr_bits);
550 	if (err) {
551 		drm_err(drm_dev,
552 			"Failed to get device virtual address space bits\n");
553 		return ERR_PTR(err);
554 	}
555 
556 	if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
557 		drm_err(drm_dev,
558 			"Device has unsupported virtual address space size\n");
559 		return ERR_PTR(-EINVAL);
560 	}
561 
562 	vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
563 	if (!vm_ctx)
564 		return ERR_PTR(-ENOMEM);
565 
566 	vm_ctx->pvr_dev = pvr_dev;
567 
568 	vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
569 	err = PTR_ERR_OR_ZERO(vm_ctx->mmu_ctx);
570 	if (err)
571 		goto err_free;
572 
573 	if (is_userspace_context) {
574 		err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext),
575 					   PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
576 					   fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj);
577 
578 		if (err)
579 			goto err_page_table_destroy;
580 	}
581 
582 	drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
583 	drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
584 		       is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
585 		       0, &pvr_dev->base, &vm_ctx->dummy_gem,
586 		       0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
587 
588 	mutex_init(&vm_ctx->lock);
589 	kref_init(&vm_ctx->ref_count);
590 
591 	return vm_ctx;
592 
593 err_page_table_destroy:
594 	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
595 
596 err_free:
597 	kfree(vm_ctx);
598 
599 	return ERR_PTR(err);
600 }
601 
602 /**
603  * pvr_vm_context_release() - Teardown a VM context.
604  * @ref_count: Pointer to reference counter of the VM context.
605  *
606  * This function also ensures that no mappings are left dangling by calling
607  * pvr_vm_unmap_all.
608  */
609 static void
pvr_vm_context_release(struct kref * ref_count)610 pvr_vm_context_release(struct kref *ref_count)
611 {
612 	struct pvr_vm_context *vm_ctx =
613 		container_of(ref_count, struct pvr_vm_context, ref_count);
614 
615 	if (vm_ctx->fw_mem_ctx_obj)
616 		pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
617 
618 	pvr_vm_unmap_all(vm_ctx);
619 
620 	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
621 	drm_gem_private_object_fini(&vm_ctx->dummy_gem);
622 	mutex_destroy(&vm_ctx->lock);
623 
624 	drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
625 }
626 
627 /**
628  * pvr_vm_context_lookup() - Look up VM context from handle
629  * @pvr_file: Pointer to pvr_file structure.
630  * @handle: Object handle.
631  *
632  * Takes reference on VM context object. Call pvr_vm_context_put() to release.
633  *
634  * Returns:
635  *  * The requested object on success, or
636  *  * %NULL on failure (object does not exist in list, or is not a VM context)
637  */
638 struct pvr_vm_context *
pvr_vm_context_lookup(struct pvr_file * pvr_file,u32 handle)639 pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
640 {
641 	struct pvr_vm_context *vm_ctx;
642 
643 	xa_lock(&pvr_file->vm_ctx_handles);
644 	vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
645 	pvr_vm_context_get(vm_ctx);
646 	xa_unlock(&pvr_file->vm_ctx_handles);
647 
648 	return vm_ctx;
649 }
650 
651 /**
652  * pvr_vm_context_put() - Release a reference on a VM context
653  * @vm_ctx: Target VM context.
654  *
655  * Returns:
656  *  * %true if the VM context was destroyed, or
657  *  * %false if there are any references still remaining.
658  */
659 bool
pvr_vm_context_put(struct pvr_vm_context * vm_ctx)660 pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
661 {
662 	if (vm_ctx)
663 		return kref_put(&vm_ctx->ref_count, pvr_vm_context_release);
664 
665 	return true;
666 }
667 
668 /**
669  * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
670  * given file.
671  * @pvr_file: Pointer to pvr_file structure.
672  *
673  * Removes all vm_contexts associated with @pvr_file from the device VM context
674  * list and drops initial references. vm_contexts will then be destroyed once
675  * all outstanding references are dropped.
676  */
pvr_destroy_vm_contexts_for_file(struct pvr_file * pvr_file)677 void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
678 {
679 	struct pvr_vm_context *vm_ctx;
680 	unsigned long handle;
681 
682 	xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
683 		/* vm_ctx is not used here because that would create a race with xa_erase */
684 		pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
685 	}
686 }
687 
688 static int
pvr_vm_lock_extra(struct drm_gpuvm_exec * vm_exec)689 pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
690 {
691 	struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
692 	struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
693 
694 	/* Acquire lock on the GEM object being mapped/unmapped. */
695 	return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
696 }
697 
698 /**
699  * pvr_vm_map() - Map a section of physical memory into a section of
700  * device-virtual memory.
701  * @vm_ctx: Target VM context.
702  * @pvr_obj: Target PowerVR memory object.
703  * @pvr_obj_offset: Offset into @pvr_obj to map from.
704  * @device_addr: Virtual device address at the start of the requested mapping.
705  * @size: Size of the requested mapping.
706  *
707  * No handle is returned to represent the mapping. Instead, callers should
708  * remember @device_addr and use that as a handle.
709  *
710  * Return:
711  *  * 0 on success,
712  *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
713  *    address; the region specified by @pvr_obj_offset and @size does not fall
714  *    entirely within @pvr_obj, or any part of the specified region of @pvr_obj
715  *    is not device-virtual page-aligned,
716  *  * Any error encountered while performing internal operations required to
717  *    destroy the mapping (returned from pvr_vm_gpuva_map or
718  *    pvr_vm_gpuva_remap).
719  */
720 int
pvr_vm_map(struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 pvr_obj_offset,u64 device_addr,u64 size)721 pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
722 	   u64 pvr_obj_offset, u64 device_addr, u64 size)
723 {
724 	struct pvr_vm_bind_op bind_op = {0};
725 	struct drm_gpuvm_exec vm_exec = {
726 		.vm = &vm_ctx->gpuvm_mgr,
727 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
728 			 DRM_EXEC_IGNORE_DUPLICATES,
729 		.extra = {
730 			.fn = pvr_vm_lock_extra,
731 			.priv = &bind_op,
732 		},
733 	};
734 
735 	int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
736 					  pvr_obj_offset, device_addr,
737 					  size);
738 
739 	if (err)
740 		return err;
741 
742 	pvr_gem_object_get(pvr_obj);
743 
744 	err = drm_gpuvm_exec_lock(&vm_exec);
745 	if (err)
746 		goto err_cleanup;
747 
748 	err = pvr_vm_bind_op_exec(&bind_op);
749 
750 	drm_gpuvm_exec_unlock(&vm_exec);
751 
752 err_cleanup:
753 	pvr_vm_bind_op_fini(&bind_op);
754 
755 	return err;
756 }
757 
758 /**
759  * pvr_vm_unmap_obj_locked() - Unmap an already mapped section of device-virtual
760  * memory.
761  * @vm_ctx: Target VM context.
762  * @pvr_obj: Target PowerVR memory object.
763  * @device_addr: Virtual device address at the start of the target mapping.
764  * @size: Size of the target mapping.
765  *
766  * Return:
767  *  * 0 on success,
768  *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
769  *    address,
770  *  * Any error encountered while performing internal operations required to
771  *    destroy the mapping (returned from pvr_vm_gpuva_unmap or
772  *    pvr_vm_gpuva_remap).
773  *
774  * The vm_ctx->lock must be held when calling this function.
775  */
776 static int
pvr_vm_unmap_obj_locked(struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 device_addr,u64 size)777 pvr_vm_unmap_obj_locked(struct pvr_vm_context *vm_ctx,
778 			struct pvr_gem_object *pvr_obj,
779 			u64 device_addr, u64 size)
780 {
781 	struct pvr_vm_bind_op bind_op = {0};
782 	struct drm_gpuvm_exec vm_exec = {
783 		.vm = &vm_ctx->gpuvm_mgr,
784 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
785 			 DRM_EXEC_IGNORE_DUPLICATES,
786 		.extra = {
787 			.fn = pvr_vm_lock_extra,
788 			.priv = &bind_op,
789 		},
790 	};
791 
792 	int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, pvr_obj,
793 					    device_addr, size);
794 	if (err)
795 		return err;
796 
797 	pvr_gem_object_get(pvr_obj);
798 
799 	err = drm_gpuvm_exec_lock(&vm_exec);
800 	if (err)
801 		goto err_cleanup;
802 
803 	err = pvr_vm_bind_op_exec(&bind_op);
804 
805 	drm_gpuvm_exec_unlock(&vm_exec);
806 
807 err_cleanup:
808 	pvr_vm_bind_op_fini(&bind_op);
809 
810 	return err;
811 }
812 
813 /**
814  * pvr_vm_unmap_obj() - Unmap an already mapped section of device-virtual
815  * memory.
816  * @vm_ctx: Target VM context.
817  * @pvr_obj: Target PowerVR memory object.
818  * @device_addr: Virtual device address at the start of the target mapping.
819  * @size: Size of the target mapping.
820  *
821  * Return:
822  *  * 0 on success,
823  *  * Any error encountered by pvr_vm_unmap_obj_locked.
824  */
825 int
pvr_vm_unmap_obj(struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 device_addr,u64 size)826 pvr_vm_unmap_obj(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
827 		 u64 device_addr, u64 size)
828 {
829 	int err;
830 
831 	mutex_lock(&vm_ctx->lock);
832 	err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj, device_addr, size);
833 	mutex_unlock(&vm_ctx->lock);
834 
835 	return err;
836 }
837 
838 /**
839  * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
840  * @vm_ctx: Target VM context.
841  * @device_addr: Virtual device address at the start of the target mapping.
842  * @size: Size of the target mapping.
843  *
844  * Return:
845  *  * 0 on success,
846  *  * Any error encountered by drm_gpuva_find,
847  *  * Any error encountered by pvr_vm_unmap_obj_locked.
848  */
849 int
pvr_vm_unmap(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)850 pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
851 {
852 	struct pvr_gem_object *pvr_obj;
853 	struct drm_gpuva *va;
854 	int err;
855 
856 	mutex_lock(&vm_ctx->lock);
857 
858 	va = drm_gpuva_find(&vm_ctx->gpuvm_mgr, device_addr, size);
859 	if (va) {
860 		pvr_obj = gem_to_pvr_gem(va->gem.obj);
861 		err = pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
862 					      va->va.addr, va->va.range);
863 	} else {
864 		err = -ENOENT;
865 	}
866 
867 	mutex_unlock(&vm_ctx->lock);
868 
869 	return err;
870 }
871 
872 /**
873  * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
874  * @vm_ctx: Target VM context.
875  *
876  * This function ensures that no mappings are left dangling by unmapping them
877  * all in order of ascending device-virtual address.
878  */
879 void
pvr_vm_unmap_all(struct pvr_vm_context * vm_ctx)880 pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
881 {
882 	mutex_lock(&vm_ctx->lock);
883 
884 	for (;;) {
885 		struct pvr_gem_object *pvr_obj;
886 		struct drm_gpuva *va;
887 
888 		va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr,
889 					  vm_ctx->gpuvm_mgr.mm_start,
890 					  vm_ctx->gpuvm_mgr.mm_range);
891 		if (!va)
892 			break;
893 
894 		pvr_obj = gem_to_pvr_gem(va->gem.obj);
895 
896 		WARN_ON(pvr_vm_unmap_obj_locked(vm_ctx, pvr_obj,
897 						va->va.addr, va->va.range));
898 	}
899 
900 	mutex_unlock(&vm_ctx->lock);
901 }
902 
903 /* Static data areas are determined by firmware. */
904 static const struct drm_pvr_static_data_area static_data_areas[] = {
905 	{
906 		.area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
907 		.location_heap_id = DRM_PVR_HEAP_GENERAL,
908 		.offset = 0,
909 		.size = 128,
910 	},
911 	{
912 		.area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
913 		.location_heap_id = DRM_PVR_HEAP_GENERAL,
914 		.offset = 128,
915 		.size = 1024,
916 	},
917 	{
918 		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
919 		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
920 		.offset = 0,
921 		.size = 128,
922 	},
923 	{
924 		.area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
925 		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
926 		.offset = 128,
927 		.size = 128,
928 	},
929 	{
930 		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
931 		.location_heap_id = DRM_PVR_HEAP_USC_CODE,
932 		.offset = 0,
933 		.size = 128,
934 	},
935 };
936 
937 #define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
938 
939 /*
940  * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
941  * static data area for each heap.
942  */
943 static const struct drm_pvr_heap pvr_heaps[] = {
944 	[DRM_PVR_HEAP_GENERAL] = {
945 		.base = ROGUE_GENERAL_HEAP_BASE,
946 		.size = ROGUE_GENERAL_HEAP_SIZE,
947 		.flags = 0,
948 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
949 	},
950 	[DRM_PVR_HEAP_PDS_CODE_DATA] = {
951 		.base = ROGUE_PDSCODEDATA_HEAP_BASE,
952 		.size = ROGUE_PDSCODEDATA_HEAP_SIZE,
953 		.flags = 0,
954 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
955 	},
956 	[DRM_PVR_HEAP_USC_CODE] = {
957 		.base = ROGUE_USCCODE_HEAP_BASE,
958 		.size = ROGUE_USCCODE_HEAP_SIZE,
959 		.flags = 0,
960 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
961 	},
962 	[DRM_PVR_HEAP_RGNHDR] = {
963 		.base = ROGUE_RGNHDR_HEAP_BASE,
964 		.size = ROGUE_RGNHDR_HEAP_SIZE,
965 		.flags = 0,
966 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
967 	},
968 	[DRM_PVR_HEAP_VIS_TEST] = {
969 		.base = ROGUE_VISTEST_HEAP_BASE,
970 		.size = ROGUE_VISTEST_HEAP_SIZE,
971 		.flags = 0,
972 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
973 	},
974 	[DRM_PVR_HEAP_TRANSFER_FRAG] = {
975 		.base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
976 		.size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
977 		.flags = 0,
978 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
979 	},
980 };
981 
982 int
pvr_static_data_areas_get(const struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)983 pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
984 			  struct drm_pvr_ioctl_dev_query_args *args)
985 {
986 	struct drm_pvr_dev_query_static_data_areas query = {0};
987 	int err;
988 
989 	if (!args->pointer) {
990 		args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
991 		return 0;
992 	}
993 
994 	err = PVR_UOBJ_GET(query, args->size, args->pointer);
995 	if (err < 0)
996 		return err;
997 
998 	if (!query.static_data_areas.array) {
999 		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
1000 		query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
1001 		goto copy_out;
1002 	}
1003 
1004 	if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
1005 		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
1006 
1007 	err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
1008 	if (err < 0)
1009 		return err;
1010 
1011 copy_out:
1012 	err = PVR_UOBJ_SET(args->pointer, args->size, query);
1013 	if (err < 0)
1014 		return err;
1015 
1016 	args->size = sizeof(query);
1017 	return 0;
1018 }
1019 
1020 int
pvr_heap_info_get(const struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)1021 pvr_heap_info_get(const struct pvr_device *pvr_dev,
1022 		  struct drm_pvr_ioctl_dev_query_args *args)
1023 {
1024 	struct drm_pvr_dev_query_heap_info query = {0};
1025 	u64 dest;
1026 	int err;
1027 
1028 	if (!args->pointer) {
1029 		args->size = sizeof(struct drm_pvr_dev_query_heap_info);
1030 		return 0;
1031 	}
1032 
1033 	err = PVR_UOBJ_GET(query, args->size, args->pointer);
1034 	if (err < 0)
1035 		return err;
1036 
1037 	if (!query.heaps.array) {
1038 		query.heaps.count = ARRAY_SIZE(pvr_heaps);
1039 		query.heaps.stride = sizeof(struct drm_pvr_heap);
1040 		goto copy_out;
1041 	}
1042 
1043 	if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
1044 		query.heaps.count = ARRAY_SIZE(pvr_heaps);
1045 
1046 	/* Region header heap is only present if BRN63142 is present. */
1047 	dest = query.heaps.array;
1048 	for (size_t i = 0; i < query.heaps.count; i++) {
1049 		struct drm_pvr_heap heap = pvr_heaps[i];
1050 
1051 		if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
1052 			heap.size = 0;
1053 
1054 		err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
1055 		if (err < 0)
1056 			return err;
1057 
1058 		dest += query.heaps.stride;
1059 	}
1060 
1061 copy_out:
1062 	err = PVR_UOBJ_SET(args->pointer, args->size, query);
1063 	if (err < 0)
1064 		return err;
1065 
1066 	args->size = sizeof(query);
1067 	return 0;
1068 }
1069 
1070 /**
1071  * pvr_heap_contains_range() - Determine if a given heap contains the specified
1072  *                             device-virtual address range.
1073  * @pvr_heap: Target heap.
1074  * @start: Inclusive start of the target range.
1075  * @end: Inclusive end of the target range.
1076  *
1077  * It is an error to call this function with values of @start and @end that do
1078  * not satisfy the condition @start <= @end.
1079  */
1080 static __always_inline bool
pvr_heap_contains_range(const struct drm_pvr_heap * pvr_heap,u64 start,u64 end)1081 pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
1082 {
1083 	return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
1084 }
1085 
1086 /**
1087  * pvr_find_heap_containing() - Find a heap which contains the specified
1088  *                              device-virtual address range.
1089  * @pvr_dev: Target PowerVR device.
1090  * @start: Start of the target range.
1091  * @size: Size of the target range.
1092  *
1093  * Return:
1094  *  * A pointer to a constant instance of struct drm_pvr_heap representing the
1095  *    heap containing the entire range specified by @start and @size on
1096  *    success, or
1097  *  * %NULL if no such heap exists.
1098  */
1099 const struct drm_pvr_heap *
pvr_find_heap_containing(struct pvr_device * pvr_dev,u64 start,u64 size)1100 pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
1101 {
1102 	u64 end;
1103 
1104 	if (check_add_overflow(start, size - 1, &end))
1105 		return NULL;
1106 
1107 	/*
1108 	 * There are no guarantees about the order of address ranges in
1109 	 * &pvr_heaps, so iterate over the entire array for a heap whose
1110 	 * range completely encompasses the given range.
1111 	 */
1112 	for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
1113 		/* Filter heaps that present only with an associated quirk */
1114 		if (heap_id == DRM_PVR_HEAP_RGNHDR &&
1115 		    !PVR_HAS_QUIRK(pvr_dev, 63142)) {
1116 			continue;
1117 		}
1118 
1119 		if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
1120 			return &pvr_heaps[heap_id];
1121 	}
1122 
1123 	return NULL;
1124 }
1125 
1126 /**
1127  * pvr_vm_find_gem_object() - Look up a buffer object from a given
1128  *                            device-virtual address.
1129  * @vm_ctx: [IN] Target VM context.
1130  * @device_addr: [IN] Virtual device address at the start of the required
1131  *               object.
1132  * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
1133  *                     of the mapped region within the buffer object. May be
1134  *                     %NULL if this information is not required.
1135  * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
1136  *                   region. May be %NULL if this information is not required.
1137  *
1138  * If successful, a reference will be taken on the buffer object. The caller
1139  * must drop the reference with pvr_gem_object_put().
1140  *
1141  * Return:
1142  *  * The PowerVR buffer object mapped at @device_addr if one exists, or
1143  *  * %NULL otherwise.
1144  */
1145 struct pvr_gem_object *
pvr_vm_find_gem_object(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 * mapped_offset_out,u64 * mapped_size_out)1146 pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
1147 		       u64 *mapped_offset_out, u64 *mapped_size_out)
1148 {
1149 	struct pvr_gem_object *pvr_obj;
1150 	struct drm_gpuva *va;
1151 
1152 	mutex_lock(&vm_ctx->lock);
1153 
1154 	va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1);
1155 	if (!va)
1156 		goto err_unlock;
1157 
1158 	pvr_obj = gem_to_pvr_gem(va->gem.obj);
1159 	pvr_gem_object_get(pvr_obj);
1160 
1161 	if (mapped_offset_out)
1162 		*mapped_offset_out = va->gem.offset;
1163 	if (mapped_size_out)
1164 		*mapped_size_out = va->va.range;
1165 
1166 	mutex_unlock(&vm_ctx->lock);
1167 
1168 	return pvr_obj;
1169 
1170 err_unlock:
1171 	mutex_unlock(&vm_ctx->lock);
1172 
1173 	return NULL;
1174 }
1175 
1176 /**
1177  * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
1178  * @vm_ctx: Target VM context.
1179  *
1180  * Returns:
1181  *  * FW object representing firmware memory context, or
1182  *  * %NULL if this VM context does not have a firmware memory context.
1183  */
1184 struct pvr_fw_object *
pvr_vm_get_fw_mem_context(struct pvr_vm_context * vm_ctx)1185 pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
1186 {
1187 	return vm_ctx->fw_mem_ctx_obj;
1188 }
1189