xref: /linux/drivers/gpu/drm/imagination/pvr_vm.c (revision 1a371190a375f98c9b106f758ea41558c3f92556)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3 
4 #include "pvr_vm.h"
5 
6 #include "pvr_device.h"
7 #include "pvr_drv.h"
8 #include "pvr_gem.h"
9 #include "pvr_mmu.h"
10 #include "pvr_rogue_fwif.h"
11 #include "pvr_rogue_heap_config.h"
12 
13 #include <drm/drm_exec.h>
14 #include <drm/drm_gem.h>
15 #include <drm/drm_gpuvm.h>
16 
17 #include <linux/container_of.h>
18 #include <linux/err.h>
19 #include <linux/errno.h>
20 #include <linux/gfp_types.h>
21 #include <linux/kref.h>
22 #include <linux/mutex.h>
23 #include <linux/stddef.h>
24 
25 /**
26  * DOC: Memory context
27  *
28  * This is the "top level" datatype in the VM code. It's exposed in the public
29  * API as an opaque handle.
30  */
31 
32 /**
33  * struct pvr_vm_context - Context type used to represent a single VM.
34  */
35 struct pvr_vm_context {
36 	/**
37 	 * @pvr_dev: The PowerVR device to which this context is bound.
38 	 * This binding is immutable for the life of the context.
39 	 */
40 	struct pvr_device *pvr_dev;
41 
42 	/** @mmu_ctx: The context for binding to physical memory. */
43 	struct pvr_mmu_context *mmu_ctx;
44 
45 	/** @gpuvm_mgr: GPUVM object associated with this context. */
46 	struct drm_gpuvm gpuvm_mgr;
47 
48 	/** @lock: Global lock on this VM. */
49 	struct mutex lock;
50 
51 	/**
52 	 * @fw_mem_ctx_obj: Firmware object representing firmware memory
53 	 * context.
54 	 */
55 	struct pvr_fw_object *fw_mem_ctx_obj;
56 
57 	/** @ref_count: Reference count of object. */
58 	struct kref ref_count;
59 
60 	/**
61 	 * @dummy_gem: GEM object to enable VM reservation. All private BOs
62 	 * should use the @dummy_gem.resv and not their own _resv field.
63 	 */
64 	struct drm_gem_object dummy_gem;
65 };
66 
67 static inline
to_pvr_vm_context(struct drm_gpuvm * gpuvm)68 struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
69 {
70 	return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
71 }
72 
pvr_vm_context_get(struct pvr_vm_context * vm_ctx)73 struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
74 {
75 	if (vm_ctx)
76 		kref_get(&vm_ctx->ref_count);
77 
78 	return vm_ctx;
79 }
80 
81 /**
82  * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
83  *                                     page table structure behind a VM context.
84  * @vm_ctx: Target VM context.
85  */
pvr_vm_get_page_table_root_addr(struct pvr_vm_context * vm_ctx)86 dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
87 {
88 	return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx);
89 }
90 
91 /**
92  * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
93  * @vm_ctx: Target VM context.
94  *
95  * This is used to allow private BOs to share a dma_resv for faster fence
96  * updates.
97  *
98  * Returns: The dma_resv pointer.
99  */
pvr_vm_get_dma_resv(struct pvr_vm_context * vm_ctx)100 struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx)
101 {
102 	return vm_ctx->dummy_gem.resv;
103 }
104 
105 /**
106  * DOC: Memory mappings
107  */
108 
109 /**
110  * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
111  */
112 struct pvr_vm_gpuva {
113 	/** @base: The wrapped drm_gpuva object. */
114 	struct drm_gpuva base;
115 };
116 
117 #define to_pvr_vm_gpuva(va) container_of_const(va, struct pvr_vm_gpuva, base)
118 
119 enum pvr_vm_bind_type {
120 	PVR_VM_BIND_TYPE_MAP,
121 	PVR_VM_BIND_TYPE_UNMAP,
122 };
123 
124 /**
125  * struct pvr_vm_bind_op - Context of a map/unmap operation.
126  */
127 struct pvr_vm_bind_op {
128 	/** @type: Map or unmap. */
129 	enum pvr_vm_bind_type type;
130 
131 	/** @pvr_obj: Object associated with mapping (map only). */
132 	struct pvr_gem_object *pvr_obj;
133 
134 	/**
135 	 * @vm_ctx: VM context where the mapping will be created or destroyed.
136 	 */
137 	struct pvr_vm_context *vm_ctx;
138 
139 	/** @mmu_op_ctx: MMU op context. */
140 	struct pvr_mmu_op_context *mmu_op_ctx;
141 
142 	/** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */
143 	struct drm_gpuvm_bo *gpuvm_bo;
144 
145 	/**
146 	 * @new_va: Prealloced VA mapping object (init in callback).
147 	 * Used when creating a mapping.
148 	 */
149 	struct pvr_vm_gpuva *new_va;
150 
151 	/**
152 	 * @prev_va: Prealloced VA mapping object (init in callback).
153 	 * Used when a mapping or unmapping operation overlaps an existing
154 	 * mapping and splits away the beginning into a new mapping.
155 	 */
156 	struct pvr_vm_gpuva *prev_va;
157 
158 	/**
159 	 * @next_va: Prealloced VA mapping object (init in callback).
160 	 * Used when a mapping or unmapping operation overlaps an existing
161 	 * mapping and splits away the end into a new mapping.
162 	 */
163 	struct pvr_vm_gpuva *next_va;
164 
165 	/** @offset: Offset into @pvr_obj to begin mapping from. */
166 	u64 offset;
167 
168 	/** @device_addr: Device-virtual address at the start of the mapping. */
169 	u64 device_addr;
170 
171 	/** @size: Size of the desired mapping. */
172 	u64 size;
173 };
174 
175 /**
176  * pvr_vm_bind_op_exec() - Execute a single bind op.
177  * @bind_op: Bind op context.
178  *
179  * Returns:
180  *  * 0 on success,
181  *  * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or
182  *    a callback function.
183  */
pvr_vm_bind_op_exec(struct pvr_vm_bind_op * bind_op)184 static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
185 {
186 	switch (bind_op->type) {
187 	case PVR_VM_BIND_TYPE_MAP:
188 		return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
189 					bind_op, bind_op->device_addr,
190 					bind_op->size,
191 					gem_from_pvr_gem(bind_op->pvr_obj),
192 					bind_op->offset);
193 
194 	case PVR_VM_BIND_TYPE_UNMAP:
195 		return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
196 					  bind_op, bind_op->device_addr,
197 					  bind_op->size);
198 	}
199 
200 	/*
201 	 * This shouldn't happen unless something went wrong
202 	 * in drm_sched.
203 	 */
204 	WARN_ON(1);
205 	return -EINVAL;
206 }
207 
pvr_vm_bind_op_fini(struct pvr_vm_bind_op * bind_op)208 static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op)
209 {
210 	drm_gpuvm_bo_put(bind_op->gpuvm_bo);
211 
212 	kfree(bind_op->new_va);
213 	kfree(bind_op->prev_va);
214 	kfree(bind_op->next_va);
215 
216 	if (bind_op->pvr_obj)
217 		pvr_gem_object_put(bind_op->pvr_obj);
218 
219 	if (bind_op->mmu_op_ctx)
220 		pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx);
221 }
222 
223 static int
pvr_vm_bind_op_map_init(struct pvr_vm_bind_op * bind_op,struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 offset,u64 device_addr,u64 size)224 pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
225 			struct pvr_vm_context *vm_ctx,
226 			struct pvr_gem_object *pvr_obj, u64 offset,
227 			u64 device_addr, u64 size)
228 {
229 	struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
230 	const bool is_user = vm_ctx != vm_ctx->pvr_dev->kernel_vm_ctx;
231 	const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
232 	struct sg_table *sgt;
233 	u64 offset_plus_size;
234 	int err;
235 
236 	if (check_add_overflow(offset, size, &offset_plus_size))
237 		return -EINVAL;
238 
239 	if (is_user &&
240 	    !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) {
241 		return -EINVAL;
242 	}
243 
244 	if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) ||
245 	    offset & ~PAGE_MASK || size & ~PAGE_MASK ||
246 	    offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
247 		return -EINVAL;
248 
249 	bind_op->type = PVR_VM_BIND_TYPE_MAP;
250 
251 	dma_resv_lock(obj->resv, NULL);
252 	bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
253 	dma_resv_unlock(obj->resv);
254 	if (IS_ERR(bind_op->gpuvm_bo))
255 		return PTR_ERR(bind_op->gpuvm_bo);
256 
257 	bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
258 	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
259 	bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
260 	if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) {
261 		err = -ENOMEM;
262 		goto err_bind_op_fini;
263 	}
264 
265 	/* Pin pages so they're ready for use. */
266 	sgt = pvr_gem_object_get_pages_sgt(pvr_obj);
267 	err = PTR_ERR_OR_ZERO(sgt);
268 	if (err)
269 		goto err_bind_op_fini;
270 
271 	bind_op->mmu_op_ctx =
272 		pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size);
273 	err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
274 	if (err) {
275 		bind_op->mmu_op_ctx = NULL;
276 		goto err_bind_op_fini;
277 	}
278 
279 	bind_op->pvr_obj = pvr_obj;
280 	bind_op->vm_ctx = vm_ctx;
281 	bind_op->device_addr = device_addr;
282 	bind_op->size = size;
283 	bind_op->offset = offset;
284 
285 	return 0;
286 
287 err_bind_op_fini:
288 	pvr_vm_bind_op_fini(bind_op);
289 
290 	return err;
291 }
292 
293 static int
pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op * bind_op,struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)294 pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
295 			  struct pvr_vm_context *vm_ctx, u64 device_addr,
296 			  u64 size)
297 {
298 	int err;
299 
300 	if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size))
301 		return -EINVAL;
302 
303 	bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
304 
305 	bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
306 	bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
307 	if (!bind_op->prev_va || !bind_op->next_va) {
308 		err = -ENOMEM;
309 		goto err_bind_op_fini;
310 	}
311 
312 	bind_op->mmu_op_ctx =
313 		pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0);
314 	err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
315 	if (err) {
316 		bind_op->mmu_op_ctx = NULL;
317 		goto err_bind_op_fini;
318 	}
319 
320 	bind_op->vm_ctx = vm_ctx;
321 	bind_op->device_addr = device_addr;
322 	bind_op->size = size;
323 
324 	return 0;
325 
326 err_bind_op_fini:
327 	pvr_vm_bind_op_fini(bind_op);
328 
329 	return err;
330 }
331 
332 /**
333  * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
334  * @op: gpuva op containing the remap details.
335  * @op_ctx: Operation context.
336  *
337  * Context: Called by drm_gpuvm_sm_map following a successful mapping while
338  * @op_ctx.vm_ctx mutex is held.
339  *
340  * Return:
341  *  * 0 on success, or
342  *  * Any error returned by pvr_mmu_map().
343  */
344 static int
pvr_vm_gpuva_map(struct drm_gpuva_op * op,void * op_ctx)345 pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx)
346 {
347 	struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj);
348 	struct pvr_vm_bind_op *ctx = op_ctx;
349 	int err;
350 
351 	if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
352 		return -EINVAL;
353 
354 	err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
355 			  op->map.va.addr);
356 	if (err)
357 		return err;
358 
359 	drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map);
360 	drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo);
361 	ctx->new_va = NULL;
362 
363 	return 0;
364 }
365 
366 /**
367  * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context.
368  * @op: gpuva op containing the unmap details.
369  * @op_ctx: Operation context.
370  *
371  * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while
372  * @op_ctx.vm_ctx mutex is held.
373  *
374  * Return:
375  *  * 0 on success, or
376  *  * Any error returned by pvr_mmu_unmap().
377  */
378 static int
pvr_vm_gpuva_unmap(struct drm_gpuva_op * op,void * op_ctx)379 pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
380 {
381 	struct pvr_vm_bind_op *ctx = op_ctx;
382 
383 	int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr,
384 				op->unmap.va->va.range);
385 
386 	if (err)
387 		return err;
388 
389 	drm_gpuva_unmap(&op->unmap);
390 	drm_gpuva_unlink(op->unmap.va);
391 	kfree(to_pvr_vm_gpuva(op->unmap.va));
392 
393 	return 0;
394 }
395 
396 /**
397  * pvr_vm_gpuva_remap() - Remap a mapping within a memory context.
398  * @op: gpuva op containing the remap details.
399  * @op_ctx: Operation context.
400  *
401  * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a
402  * mapping or unmapping operation causes a region to be split. The
403  * @op_ctx.vm_ctx mutex is held.
404  *
405  * Return:
406  *  * 0 on success, or
407  *  * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap().
408  */
409 static int
pvr_vm_gpuva_remap(struct drm_gpuva_op * op,void * op_ctx)410 pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
411 {
412 	struct pvr_vm_bind_op *ctx = op_ctx;
413 	u64 va_start = 0, va_range = 0;
414 	int err;
415 
416 	drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range);
417 	err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range);
418 	if (err)
419 		return err;
420 
421 	/* No actual remap required: the page table tree depth is fixed to 3,
422 	 * and we use 4k page table entries only for now.
423 	 */
424 	drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap);
425 
426 	if (op->remap.prev) {
427 		pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj));
428 		drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo);
429 		ctx->prev_va = NULL;
430 	}
431 
432 	if (op->remap.next) {
433 		pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj));
434 		drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo);
435 		ctx->next_va = NULL;
436 	}
437 
438 	drm_gpuva_unlink(op->remap.unmap->va);
439 	kfree(to_pvr_vm_gpuva(op->remap.unmap->va));
440 
441 	return 0;
442 }
443 
444 /*
445  * Public API
446  *
447  * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
448  */
449 
450 /**
451  * pvr_device_addr_is_valid() - Tests whether a device-virtual address
452  *                              is valid.
453  * @device_addr: Virtual device address to test.
454  *
455  * Return:
456  *  * %true if @device_addr is within the valid range for a device page
457  *    table and is aligned to the device page size, or
458  *  * %false otherwise.
459  */
460 bool
pvr_device_addr_is_valid(u64 device_addr)461 pvr_device_addr_is_valid(u64 device_addr)
462 {
463 	return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
464 	       (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
465 }
466 
467 /**
468  * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
469  * address and associated size are both valid.
470  * @vm_ctx: Target VM context.
471  * @device_addr: Virtual device address to test.
472  * @size: Size of the range based at @device_addr to test.
473  *
474  * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
475  * @device_addr + @size) to verify a device-virtual address range initially
476  * seems intuitive, but it produces a false-negative when the address range
477  * is right at the end of device-virtual address space.
478  *
479  * This function catches that corner case, as well as checking that
480  * @size is non-zero.
481  *
482  * Return:
483  *  * %true if @device_addr is device page aligned; @size is device page
484  *    aligned; the range specified by @device_addr and @size is within the
485  *    bounds of the device-virtual address space, and @size is non-zero, or
486  *  * %false otherwise.
487  */
488 bool
pvr_device_addr_and_size_are_valid(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)489 pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
490 				   u64 device_addr, u64 size)
491 {
492 	return pvr_device_addr_is_valid(device_addr) &&
493 	       drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) &&
494 	       size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
495 	       (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
496 }
497 
pvr_gpuvm_free(struct drm_gpuvm * gpuvm)498 static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
499 {
500 	kfree(to_pvr_vm_context(gpuvm));
501 }
502 
503 static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
504 	.vm_free = pvr_gpuvm_free,
505 	.sm_step_map = pvr_vm_gpuva_map,
506 	.sm_step_remap = pvr_vm_gpuva_remap,
507 	.sm_step_unmap = pvr_vm_gpuva_unmap,
508 };
509 
510 static void
fw_mem_context_init(void * cpu_ptr,void * priv)511 fw_mem_context_init(void *cpu_ptr, void *priv)
512 {
513 	struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr;
514 	struct pvr_vm_context *vm_ctx = priv;
515 
516 	fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx);
517 	fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET;
518 }
519 
520 /**
521  * pvr_vm_create_context() - Create a new VM context.
522  * @pvr_dev: Target PowerVR device.
523  * @is_userspace_context: %true if this context is for userspace. This will
524  *                        create a firmware memory context for the VM context
525  *                        and disable warnings when tearing down mappings.
526  *
527  * Return:
528  *  * A handle to the newly-minted VM context on success,
529  *  * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
530  *    missing or has an unsupported value,
531  *  * -%ENOMEM if allocation of the structure behind the opaque handle fails,
532  *    or
533  *  * Any error encountered while setting up internal structures.
534  */
535 struct pvr_vm_context *
pvr_vm_create_context(struct pvr_device * pvr_dev,bool is_userspace_context)536 pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
537 {
538 	struct drm_device *drm_dev = from_pvr_device(pvr_dev);
539 
540 	struct pvr_vm_context *vm_ctx;
541 	u16 device_addr_bits;
542 
543 	int err;
544 
545 	err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
546 				&device_addr_bits);
547 	if (err) {
548 		drm_err(drm_dev,
549 			"Failed to get device virtual address space bits\n");
550 		return ERR_PTR(err);
551 	}
552 
553 	if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
554 		drm_err(drm_dev,
555 			"Device has unsupported virtual address space size\n");
556 		return ERR_PTR(-EINVAL);
557 	}
558 
559 	vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
560 	if (!vm_ctx)
561 		return ERR_PTR(-ENOMEM);
562 
563 	vm_ctx->pvr_dev = pvr_dev;
564 
565 	vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
566 	err = PTR_ERR_OR_ZERO(vm_ctx->mmu_ctx);
567 	if (err)
568 		goto err_free;
569 
570 	if (is_userspace_context) {
571 		err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext),
572 					   PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
573 					   fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj);
574 
575 		if (err)
576 			goto err_page_table_destroy;
577 	}
578 
579 	drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
580 	drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
581 		       is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
582 		       0, &pvr_dev->base, &vm_ctx->dummy_gem,
583 		       0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
584 
585 	mutex_init(&vm_ctx->lock);
586 	kref_init(&vm_ctx->ref_count);
587 
588 	return vm_ctx;
589 
590 err_page_table_destroy:
591 	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
592 
593 err_free:
594 	kfree(vm_ctx);
595 
596 	return ERR_PTR(err);
597 }
598 
599 /**
600  * pvr_vm_context_release() - Teardown a VM context.
601  * @ref_count: Pointer to reference counter of the VM context.
602  *
603  * This function ensures that no mappings are left dangling by unmapping them
604  * all in order of ascending device-virtual address.
605  */
606 static void
pvr_vm_context_release(struct kref * ref_count)607 pvr_vm_context_release(struct kref *ref_count)
608 {
609 	struct pvr_vm_context *vm_ctx =
610 		container_of(ref_count, struct pvr_vm_context, ref_count);
611 
612 	if (vm_ctx->fw_mem_ctx_obj)
613 		pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
614 
615 	WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
616 			     vm_ctx->gpuvm_mgr.mm_range));
617 
618 	pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
619 	drm_gem_private_object_fini(&vm_ctx->dummy_gem);
620 	mutex_destroy(&vm_ctx->lock);
621 
622 	drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
623 }
624 
625 /**
626  * pvr_vm_context_lookup() - Look up VM context from handle
627  * @pvr_file: Pointer to pvr_file structure.
628  * @handle: Object handle.
629  *
630  * Takes reference on VM context object. Call pvr_vm_context_put() to release.
631  *
632  * Returns:
633  *  * The requested object on success, or
634  *  * %NULL on failure (object does not exist in list, or is not a VM context)
635  */
636 struct pvr_vm_context *
pvr_vm_context_lookup(struct pvr_file * pvr_file,u32 handle)637 pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
638 {
639 	struct pvr_vm_context *vm_ctx;
640 
641 	xa_lock(&pvr_file->vm_ctx_handles);
642 	vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
643 	if (vm_ctx)
644 		kref_get(&vm_ctx->ref_count);
645 
646 	xa_unlock(&pvr_file->vm_ctx_handles);
647 
648 	return vm_ctx;
649 }
650 
651 /**
652  * pvr_vm_context_put() - Release a reference on a VM context
653  * @vm_ctx: Target VM context.
654  *
655  * Returns:
656  *  * %true if the VM context was destroyed, or
657  *  * %false if there are any references still remaining.
658  */
659 bool
pvr_vm_context_put(struct pvr_vm_context * vm_ctx)660 pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
661 {
662 	if (vm_ctx)
663 		return kref_put(&vm_ctx->ref_count, pvr_vm_context_release);
664 
665 	return true;
666 }
667 
668 /**
669  * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
670  * given file.
671  * @pvr_file: Pointer to pvr_file structure.
672  *
673  * Removes all vm_contexts associated with @pvr_file from the device VM context
674  * list and drops initial references. vm_contexts will then be destroyed once
675  * all outstanding references are dropped.
676  */
pvr_destroy_vm_contexts_for_file(struct pvr_file * pvr_file)677 void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
678 {
679 	struct pvr_vm_context *vm_ctx;
680 	unsigned long handle;
681 
682 	xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
683 		/* vm_ctx is not used here because that would create a race with xa_erase */
684 		pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
685 	}
686 }
687 
688 static int
pvr_vm_lock_extra(struct drm_gpuvm_exec * vm_exec)689 pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
690 {
691 	struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
692 	struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
693 
694 	/* Unmap operations don't have an object to lock. */
695 	if (!pvr_obj)
696 		return 0;
697 
698 	/* Acquire lock on the GEM being mapped. */
699 	return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
700 }
701 
702 /**
703  * pvr_vm_map() - Map a section of physical memory into a section of
704  * device-virtual memory.
705  * @vm_ctx: Target VM context.
706  * @pvr_obj: Target PowerVR memory object.
707  * @pvr_obj_offset: Offset into @pvr_obj to map from.
708  * @device_addr: Virtual device address at the start of the requested mapping.
709  * @size: Size of the requested mapping.
710  *
711  * No handle is returned to represent the mapping. Instead, callers should
712  * remember @device_addr and use that as a handle.
713  *
714  * Return:
715  *  * 0 on success,
716  *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
717  *    address; the region specified by @pvr_obj_offset and @size does not fall
718  *    entirely within @pvr_obj, or any part of the specified region of @pvr_obj
719  *    is not device-virtual page-aligned,
720  *  * Any error encountered while performing internal operations required to
721  *    destroy the mapping (returned from pvr_vm_gpuva_map or
722  *    pvr_vm_gpuva_remap).
723  */
724 int
pvr_vm_map(struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 pvr_obj_offset,u64 device_addr,u64 size)725 pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
726 	   u64 pvr_obj_offset, u64 device_addr, u64 size)
727 {
728 	struct pvr_vm_bind_op bind_op = {0};
729 	struct drm_gpuvm_exec vm_exec = {
730 		.vm = &vm_ctx->gpuvm_mgr,
731 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
732 			 DRM_EXEC_IGNORE_DUPLICATES,
733 		.extra = {
734 			.fn = pvr_vm_lock_extra,
735 			.priv = &bind_op,
736 		},
737 	};
738 
739 	int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
740 					  pvr_obj_offset, device_addr,
741 					  size);
742 
743 	if (err)
744 		return err;
745 
746 	pvr_gem_object_get(pvr_obj);
747 
748 	err = drm_gpuvm_exec_lock(&vm_exec);
749 	if (err)
750 		goto err_cleanup;
751 
752 	err = pvr_vm_bind_op_exec(&bind_op);
753 
754 	drm_gpuvm_exec_unlock(&vm_exec);
755 
756 err_cleanup:
757 	pvr_vm_bind_op_fini(&bind_op);
758 
759 	return err;
760 }
761 
762 /**
763  * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
764  * @vm_ctx: Target VM context.
765  * @device_addr: Virtual device address at the start of the target mapping.
766  * @size: Size of the target mapping.
767  *
768  * Return:
769  *  * 0 on success,
770  *  * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
771  *    address,
772  *  * Any error encountered while performing internal operations required to
773  *    destroy the mapping (returned from pvr_vm_gpuva_unmap or
774  *    pvr_vm_gpuva_remap).
775  */
776 int
pvr_vm_unmap(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)777 pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
778 {
779 	struct pvr_vm_bind_op bind_op = {0};
780 	struct drm_gpuvm_exec vm_exec = {
781 		.vm = &vm_ctx->gpuvm_mgr,
782 		.flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
783 			 DRM_EXEC_IGNORE_DUPLICATES,
784 		.extra = {
785 			.fn = pvr_vm_lock_extra,
786 			.priv = &bind_op,
787 		},
788 	};
789 
790 	int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
791 					    size);
792 	if (err)
793 		return err;
794 
795 	err = drm_gpuvm_exec_lock(&vm_exec);
796 	if (err)
797 		goto err_cleanup;
798 
799 	err = pvr_vm_bind_op_exec(&bind_op);
800 
801 	drm_gpuvm_exec_unlock(&vm_exec);
802 
803 err_cleanup:
804 	pvr_vm_bind_op_fini(&bind_op);
805 
806 	return err;
807 }
808 
809 /* Static data areas are determined by firmware. */
810 static const struct drm_pvr_static_data_area static_data_areas[] = {
811 	{
812 		.area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
813 		.location_heap_id = DRM_PVR_HEAP_GENERAL,
814 		.offset = 0,
815 		.size = 128,
816 	},
817 	{
818 		.area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
819 		.location_heap_id = DRM_PVR_HEAP_GENERAL,
820 		.offset = 128,
821 		.size = 1024,
822 	},
823 	{
824 		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
825 		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
826 		.offset = 0,
827 		.size = 128,
828 	},
829 	{
830 		.area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
831 		.location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
832 		.offset = 128,
833 		.size = 128,
834 	},
835 	{
836 		.area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
837 		.location_heap_id = DRM_PVR_HEAP_USC_CODE,
838 		.offset = 0,
839 		.size = 128,
840 	},
841 };
842 
843 #define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
844 
845 /*
846  * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
847  * static data area for each heap.
848  */
849 static const struct drm_pvr_heap pvr_heaps[] = {
850 	[DRM_PVR_HEAP_GENERAL] = {
851 		.base = ROGUE_GENERAL_HEAP_BASE,
852 		.size = ROGUE_GENERAL_HEAP_SIZE,
853 		.flags = 0,
854 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
855 	},
856 	[DRM_PVR_HEAP_PDS_CODE_DATA] = {
857 		.base = ROGUE_PDSCODEDATA_HEAP_BASE,
858 		.size = ROGUE_PDSCODEDATA_HEAP_SIZE,
859 		.flags = 0,
860 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
861 	},
862 	[DRM_PVR_HEAP_USC_CODE] = {
863 		.base = ROGUE_USCCODE_HEAP_BASE,
864 		.size = ROGUE_USCCODE_HEAP_SIZE,
865 		.flags = 0,
866 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
867 	},
868 	[DRM_PVR_HEAP_RGNHDR] = {
869 		.base = ROGUE_RGNHDR_HEAP_BASE,
870 		.size = ROGUE_RGNHDR_HEAP_SIZE,
871 		.flags = 0,
872 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
873 	},
874 	[DRM_PVR_HEAP_VIS_TEST] = {
875 		.base = ROGUE_VISTEST_HEAP_BASE,
876 		.size = ROGUE_VISTEST_HEAP_SIZE,
877 		.flags = 0,
878 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
879 	},
880 	[DRM_PVR_HEAP_TRANSFER_FRAG] = {
881 		.base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
882 		.size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
883 		.flags = 0,
884 		.page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
885 	},
886 };
887 
888 int
pvr_static_data_areas_get(const struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)889 pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
890 			  struct drm_pvr_ioctl_dev_query_args *args)
891 {
892 	struct drm_pvr_dev_query_static_data_areas query = {0};
893 	int err;
894 
895 	if (!args->pointer) {
896 		args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
897 		return 0;
898 	}
899 
900 	err = PVR_UOBJ_GET(query, args->size, args->pointer);
901 	if (err < 0)
902 		return err;
903 
904 	if (!query.static_data_areas.array) {
905 		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
906 		query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
907 		goto copy_out;
908 	}
909 
910 	if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
911 		query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
912 
913 	err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
914 	if (err < 0)
915 		return err;
916 
917 copy_out:
918 	err = PVR_UOBJ_SET(args->pointer, args->size, query);
919 	if (err < 0)
920 		return err;
921 
922 	args->size = sizeof(query);
923 	return 0;
924 }
925 
926 int
pvr_heap_info_get(const struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)927 pvr_heap_info_get(const struct pvr_device *pvr_dev,
928 		  struct drm_pvr_ioctl_dev_query_args *args)
929 {
930 	struct drm_pvr_dev_query_heap_info query = {0};
931 	u64 dest;
932 	int err;
933 
934 	if (!args->pointer) {
935 		args->size = sizeof(struct drm_pvr_dev_query_heap_info);
936 		return 0;
937 	}
938 
939 	err = PVR_UOBJ_GET(query, args->size, args->pointer);
940 	if (err < 0)
941 		return err;
942 
943 	if (!query.heaps.array) {
944 		query.heaps.count = ARRAY_SIZE(pvr_heaps);
945 		query.heaps.stride = sizeof(struct drm_pvr_heap);
946 		goto copy_out;
947 	}
948 
949 	if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
950 		query.heaps.count = ARRAY_SIZE(pvr_heaps);
951 
952 	/* Region header heap is only present if BRN63142 is present. */
953 	dest = query.heaps.array;
954 	for (size_t i = 0; i < query.heaps.count; i++) {
955 		struct drm_pvr_heap heap = pvr_heaps[i];
956 
957 		if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
958 			heap.size = 0;
959 
960 		err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
961 		if (err < 0)
962 			return err;
963 
964 		dest += query.heaps.stride;
965 	}
966 
967 copy_out:
968 	err = PVR_UOBJ_SET(args->pointer, args->size, query);
969 	if (err < 0)
970 		return err;
971 
972 	args->size = sizeof(query);
973 	return 0;
974 }
975 
976 /**
977  * pvr_heap_contains_range() - Determine if a given heap contains the specified
978  *                             device-virtual address range.
979  * @pvr_heap: Target heap.
980  * @start: Inclusive start of the target range.
981  * @end: Inclusive end of the target range.
982  *
983  * It is an error to call this function with values of @start and @end that do
984  * not satisfy the condition @start <= @end.
985  */
986 static __always_inline bool
pvr_heap_contains_range(const struct drm_pvr_heap * pvr_heap,u64 start,u64 end)987 pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
988 {
989 	return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
990 }
991 
992 /**
993  * pvr_find_heap_containing() - Find a heap which contains the specified
994  *                              device-virtual address range.
995  * @pvr_dev: Target PowerVR device.
996  * @start: Start of the target range.
997  * @size: Size of the target range.
998  *
999  * Return:
1000  *  * A pointer to a constant instance of struct drm_pvr_heap representing the
1001  *    heap containing the entire range specified by @start and @size on
1002  *    success, or
1003  *  * %NULL if no such heap exists.
1004  */
1005 const struct drm_pvr_heap *
pvr_find_heap_containing(struct pvr_device * pvr_dev,u64 start,u64 size)1006 pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
1007 {
1008 	u64 end;
1009 
1010 	if (check_add_overflow(start, size - 1, &end))
1011 		return NULL;
1012 
1013 	/*
1014 	 * There are no guarantees about the order of address ranges in
1015 	 * &pvr_heaps, so iterate over the entire array for a heap whose
1016 	 * range completely encompasses the given range.
1017 	 */
1018 	for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
1019 		/* Filter heaps that present only with an associated quirk */
1020 		if (heap_id == DRM_PVR_HEAP_RGNHDR &&
1021 		    !PVR_HAS_QUIRK(pvr_dev, 63142)) {
1022 			continue;
1023 		}
1024 
1025 		if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
1026 			return &pvr_heaps[heap_id];
1027 	}
1028 
1029 	return NULL;
1030 }
1031 
1032 /**
1033  * pvr_vm_find_gem_object() - Look up a buffer object from a given
1034  *                            device-virtual address.
1035  * @vm_ctx: [IN] Target VM context.
1036  * @device_addr: [IN] Virtual device address at the start of the required
1037  *               object.
1038  * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
1039  *                     of the mapped region within the buffer object. May be
1040  *                     %NULL if this information is not required.
1041  * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
1042  *                   region. May be %NULL if this information is not required.
1043  *
1044  * If successful, a reference will be taken on the buffer object. The caller
1045  * must drop the reference with pvr_gem_object_put().
1046  *
1047  * Return:
1048  *  * The PowerVR buffer object mapped at @device_addr if one exists, or
1049  *  * %NULL otherwise.
1050  */
1051 struct pvr_gem_object *
pvr_vm_find_gem_object(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 * mapped_offset_out,u64 * mapped_size_out)1052 pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
1053 		       u64 *mapped_offset_out, u64 *mapped_size_out)
1054 {
1055 	struct pvr_gem_object *pvr_obj;
1056 	struct drm_gpuva *va;
1057 
1058 	mutex_lock(&vm_ctx->lock);
1059 
1060 	va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1);
1061 	if (!va)
1062 		goto err_unlock;
1063 
1064 	pvr_obj = gem_to_pvr_gem(va->gem.obj);
1065 	pvr_gem_object_get(pvr_obj);
1066 
1067 	if (mapped_offset_out)
1068 		*mapped_offset_out = va->gem.offset;
1069 	if (mapped_size_out)
1070 		*mapped_size_out = va->va.range;
1071 
1072 	mutex_unlock(&vm_ctx->lock);
1073 
1074 	return pvr_obj;
1075 
1076 err_unlock:
1077 	mutex_unlock(&vm_ctx->lock);
1078 
1079 	return NULL;
1080 }
1081 
1082 /**
1083  * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
1084  * @vm_ctx: Target VM context.
1085  *
1086  * Returns:
1087  *  * FW object representing firmware memory context, or
1088  *  * %NULL if this VM context does not have a firmware memory context.
1089  */
1090 struct pvr_fw_object *
pvr_vm_get_fw_mem_context(struct pvr_vm_context * vm_ctx)1091 pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
1092 {
1093 	return vm_ctx->fw_mem_ctx_obj;
1094 }
1095