1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /* Copyright (c) 2023 Imagination Technologies Ltd. */
3
4 #include "pvr_vm.h"
5
6 #include "pvr_device.h"
7 #include "pvr_drv.h"
8 #include "pvr_gem.h"
9 #include "pvr_mmu.h"
10 #include "pvr_rogue_fwif.h"
11 #include "pvr_rogue_heap_config.h"
12
13 #include <drm/drm_exec.h>
14 #include <drm/drm_gem.h>
15 #include <drm/drm_gpuvm.h>
16
17 #include <linux/bug.h>
18 #include <linux/container_of.h>
19 #include <linux/err.h>
20 #include <linux/errno.h>
21 #include <linux/gfp_types.h>
22 #include <linux/kref.h>
23 #include <linux/mutex.h>
24 #include <linux/stddef.h>
25
26 /**
27 * DOC: Memory context
28 *
29 * This is the "top level" datatype in the VM code. It's exposed in the public
30 * API as an opaque handle.
31 */
32
33 /**
34 * struct pvr_vm_context - Context type used to represent a single VM.
35 */
36 struct pvr_vm_context {
37 /**
38 * @pvr_dev: The PowerVR device to which this context is bound.
39 * This binding is immutable for the life of the context.
40 */
41 struct pvr_device *pvr_dev;
42
43 /** @mmu_ctx: The context for binding to physical memory. */
44 struct pvr_mmu_context *mmu_ctx;
45
46 /** @gpuvm_mgr: GPUVM object associated with this context. */
47 struct drm_gpuvm gpuvm_mgr;
48
49 /** @lock: Global lock on this VM. */
50 struct mutex lock;
51
52 /**
53 * @fw_mem_ctx_obj: Firmware object representing firmware memory
54 * context.
55 */
56 struct pvr_fw_object *fw_mem_ctx_obj;
57
58 /** @ref_count: Reference count of object. */
59 struct kref ref_count;
60
61 /**
62 * @dummy_gem: GEM object to enable VM reservation. All private BOs
63 * should use the @dummy_gem.resv and not their own _resv field.
64 */
65 struct drm_gem_object dummy_gem;
66 };
67
68 static inline
to_pvr_vm_context(struct drm_gpuvm * gpuvm)69 struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm)
70 {
71 return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr);
72 }
73
pvr_vm_context_get(struct pvr_vm_context * vm_ctx)74 struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx)
75 {
76 if (vm_ctx)
77 kref_get(&vm_ctx->ref_count);
78
79 return vm_ctx;
80 }
81
82 /**
83 * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the
84 * page table structure behind a VM context.
85 * @vm_ctx: Target VM context.
86 */
pvr_vm_get_page_table_root_addr(struct pvr_vm_context * vm_ctx)87 dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx)
88 {
89 return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx);
90 }
91
92 /**
93 * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context.
94 * @vm_ctx: Target VM context.
95 *
96 * This is used to allow private BOs to share a dma_resv for faster fence
97 * updates.
98 *
99 * Returns: The dma_resv pointer.
100 */
pvr_vm_get_dma_resv(struct pvr_vm_context * vm_ctx)101 struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx)
102 {
103 return vm_ctx->dummy_gem.resv;
104 }
105
106 /**
107 * DOC: Memory mappings
108 */
109
110 /**
111 * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping.
112 */
113 struct pvr_vm_gpuva {
114 /** @base: The wrapped drm_gpuva object. */
115 struct drm_gpuva base;
116 };
117
118 #define to_pvr_vm_gpuva(va) container_of_const(va, struct pvr_vm_gpuva, base)
119
120 enum pvr_vm_bind_type {
121 PVR_VM_BIND_TYPE_MAP,
122 PVR_VM_BIND_TYPE_UNMAP,
123 };
124
125 /**
126 * struct pvr_vm_bind_op - Context of a map/unmap operation.
127 */
128 struct pvr_vm_bind_op {
129 /** @type: Map or unmap. */
130 enum pvr_vm_bind_type type;
131
132 /** @pvr_obj: Object associated with mapping (map only). */
133 struct pvr_gem_object *pvr_obj;
134
135 /**
136 * @vm_ctx: VM context where the mapping will be created or destroyed.
137 */
138 struct pvr_vm_context *vm_ctx;
139
140 /** @mmu_op_ctx: MMU op context. */
141 struct pvr_mmu_op_context *mmu_op_ctx;
142
143 /** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */
144 struct drm_gpuvm_bo *gpuvm_bo;
145
146 /**
147 * @new_va: Prealloced VA mapping object (init in callback).
148 * Used when creating a mapping.
149 */
150 struct pvr_vm_gpuva *new_va;
151
152 /**
153 * @prev_va: Prealloced VA mapping object (init in callback).
154 * Used when a mapping or unmapping operation overlaps an existing
155 * mapping and splits away the beginning into a new mapping.
156 */
157 struct pvr_vm_gpuva *prev_va;
158
159 /**
160 * @next_va: Prealloced VA mapping object (init in callback).
161 * Used when a mapping or unmapping operation overlaps an existing
162 * mapping and splits away the end into a new mapping.
163 */
164 struct pvr_vm_gpuva *next_va;
165
166 /** @offset: Offset into @pvr_obj to begin mapping from. */
167 u64 offset;
168
169 /** @device_addr: Device-virtual address at the start of the mapping. */
170 u64 device_addr;
171
172 /** @size: Size of the desired mapping. */
173 u64 size;
174 };
175
176 /**
177 * pvr_vm_bind_op_exec() - Execute a single bind op.
178 * @bind_op: Bind op context.
179 *
180 * Returns:
181 * * 0 on success,
182 * * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or
183 * a callback function.
184 */
pvr_vm_bind_op_exec(struct pvr_vm_bind_op * bind_op)185 static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op)
186 {
187 switch (bind_op->type) {
188 case PVR_VM_BIND_TYPE_MAP:
189 return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr,
190 bind_op, bind_op->device_addr,
191 bind_op->size,
192 gem_from_pvr_gem(bind_op->pvr_obj),
193 bind_op->offset);
194
195 case PVR_VM_BIND_TYPE_UNMAP:
196 return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr,
197 bind_op, bind_op->device_addr,
198 bind_op->size);
199 }
200
201 /*
202 * This shouldn't happen unless something went wrong
203 * in drm_sched.
204 */
205 WARN_ON(1);
206 return -EINVAL;
207 }
208
pvr_vm_bind_op_fini(struct pvr_vm_bind_op * bind_op)209 static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op)
210 {
211 drm_gpuvm_bo_put(bind_op->gpuvm_bo);
212
213 kfree(bind_op->new_va);
214 kfree(bind_op->prev_va);
215 kfree(bind_op->next_va);
216
217 if (bind_op->pvr_obj)
218 pvr_gem_object_put(bind_op->pvr_obj);
219
220 if (bind_op->mmu_op_ctx)
221 pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx);
222 }
223
224 static int
pvr_vm_bind_op_map_init(struct pvr_vm_bind_op * bind_op,struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 offset,u64 device_addr,u64 size)225 pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op,
226 struct pvr_vm_context *vm_ctx,
227 struct pvr_gem_object *pvr_obj, u64 offset,
228 u64 device_addr, u64 size)
229 {
230 struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj);
231 const bool is_user = vm_ctx != vm_ctx->pvr_dev->kernel_vm_ctx;
232 const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj);
233 struct sg_table *sgt;
234 u64 offset_plus_size;
235 int err;
236
237 if (check_add_overflow(offset, size, &offset_plus_size))
238 return -EINVAL;
239
240 if (is_user &&
241 !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) {
242 return -EINVAL;
243 }
244
245 if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) ||
246 offset & ~PAGE_MASK || size & ~PAGE_MASK ||
247 offset >= pvr_obj_size || offset_plus_size > pvr_obj_size)
248 return -EINVAL;
249
250 bind_op->type = PVR_VM_BIND_TYPE_MAP;
251
252 dma_resv_lock(obj->resv, NULL);
253 bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj);
254 dma_resv_unlock(obj->resv);
255 if (IS_ERR(bind_op->gpuvm_bo))
256 return PTR_ERR(bind_op->gpuvm_bo);
257
258 bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL);
259 bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
260 bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
261 if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) {
262 err = -ENOMEM;
263 goto err_bind_op_fini;
264 }
265
266 /* Pin pages so they're ready for use. */
267 sgt = pvr_gem_object_get_pages_sgt(pvr_obj);
268 err = PTR_ERR_OR_ZERO(sgt);
269 if (err)
270 goto err_bind_op_fini;
271
272 bind_op->mmu_op_ctx =
273 pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size);
274 err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
275 if (err) {
276 bind_op->mmu_op_ctx = NULL;
277 goto err_bind_op_fini;
278 }
279
280 bind_op->pvr_obj = pvr_obj;
281 bind_op->vm_ctx = vm_ctx;
282 bind_op->device_addr = device_addr;
283 bind_op->size = size;
284 bind_op->offset = offset;
285
286 return 0;
287
288 err_bind_op_fini:
289 pvr_vm_bind_op_fini(bind_op);
290
291 return err;
292 }
293
294 static int
pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op * bind_op,struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)295 pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op,
296 struct pvr_vm_context *vm_ctx, u64 device_addr,
297 u64 size)
298 {
299 int err;
300
301 if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size))
302 return -EINVAL;
303
304 bind_op->type = PVR_VM_BIND_TYPE_UNMAP;
305
306 bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL);
307 bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL);
308 if (!bind_op->prev_va || !bind_op->next_va) {
309 err = -ENOMEM;
310 goto err_bind_op_fini;
311 }
312
313 bind_op->mmu_op_ctx =
314 pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0);
315 err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx);
316 if (err) {
317 bind_op->mmu_op_ctx = NULL;
318 goto err_bind_op_fini;
319 }
320
321 bind_op->vm_ctx = vm_ctx;
322 bind_op->device_addr = device_addr;
323 bind_op->size = size;
324
325 return 0;
326
327 err_bind_op_fini:
328 pvr_vm_bind_op_fini(bind_op);
329
330 return err;
331 }
332
333 /**
334 * pvr_vm_gpuva_map() - Insert a mapping into a memory context.
335 * @op: gpuva op containing the remap details.
336 * @op_ctx: Operation context.
337 *
338 * Context: Called by drm_gpuvm_sm_map following a successful mapping while
339 * @op_ctx.vm_ctx mutex is held.
340 *
341 * Return:
342 * * 0 on success, or
343 * * Any error returned by pvr_mmu_map().
344 */
345 static int
pvr_vm_gpuva_map(struct drm_gpuva_op * op,void * op_ctx)346 pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx)
347 {
348 struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj);
349 struct pvr_vm_bind_op *ctx = op_ctx;
350 int err;
351
352 if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK)
353 return -EINVAL;
354
355 err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags,
356 op->map.va.addr);
357 if (err)
358 return err;
359
360 drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map);
361 drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo);
362 ctx->new_va = NULL;
363
364 return 0;
365 }
366
367 /**
368 * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context.
369 * @op: gpuva op containing the unmap details.
370 * @op_ctx: Operation context.
371 *
372 * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while
373 * @op_ctx.vm_ctx mutex is held.
374 *
375 * Return:
376 * * 0 on success, or
377 * * Any error returned by pvr_mmu_unmap().
378 */
379 static int
pvr_vm_gpuva_unmap(struct drm_gpuva_op * op,void * op_ctx)380 pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx)
381 {
382 struct pvr_vm_bind_op *ctx = op_ctx;
383
384 int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr,
385 op->unmap.va->va.range);
386
387 if (err)
388 return err;
389
390 drm_gpuva_unmap(&op->unmap);
391 drm_gpuva_unlink(op->unmap.va);
392 kfree(to_pvr_vm_gpuva(op->unmap.va));
393
394 return 0;
395 }
396
397 /**
398 * pvr_vm_gpuva_remap() - Remap a mapping within a memory context.
399 * @op: gpuva op containing the remap details.
400 * @op_ctx: Operation context.
401 *
402 * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a
403 * mapping or unmapping operation causes a region to be split. The
404 * @op_ctx.vm_ctx mutex is held.
405 *
406 * Return:
407 * * 0 on success, or
408 * * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap().
409 */
410 static int
pvr_vm_gpuva_remap(struct drm_gpuva_op * op,void * op_ctx)411 pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx)
412 {
413 struct pvr_vm_bind_op *ctx = op_ctx;
414 u64 va_start = 0, va_range = 0;
415 int err;
416
417 drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range);
418 err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range);
419 if (err)
420 return err;
421
422 /* No actual remap required: the page table tree depth is fixed to 3,
423 * and we use 4k page table entries only for now.
424 */
425 drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap);
426
427 if (op->remap.prev) {
428 pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj));
429 drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo);
430 ctx->prev_va = NULL;
431 }
432
433 if (op->remap.next) {
434 pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj));
435 drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo);
436 ctx->next_va = NULL;
437 }
438
439 drm_gpuva_unlink(op->remap.unmap->va);
440 kfree(to_pvr_vm_gpuva(op->remap.unmap->va));
441
442 return 0;
443 }
444
445 /*
446 * Public API
447 *
448 * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h".
449 */
450
451 /**
452 * pvr_device_addr_is_valid() - Tests whether a device-virtual address
453 * is valid.
454 * @device_addr: Virtual device address to test.
455 *
456 * Return:
457 * * %true if @device_addr is within the valid range for a device page
458 * table and is aligned to the device page size, or
459 * * %false otherwise.
460 */
461 bool
pvr_device_addr_is_valid(u64 device_addr)462 pvr_device_addr_is_valid(u64 device_addr)
463 {
464 return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 &&
465 (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0;
466 }
467
468 /**
469 * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual
470 * address and associated size are both valid.
471 * @vm_ctx: Target VM context.
472 * @device_addr: Virtual device address to test.
473 * @size: Size of the range based at @device_addr to test.
474 *
475 * Calling pvr_device_addr_is_valid() twice (once on @size, and again on
476 * @device_addr + @size) to verify a device-virtual address range initially
477 * seems intuitive, but it produces a false-negative when the address range
478 * is right at the end of device-virtual address space.
479 *
480 * This function catches that corner case, as well as checking that
481 * @size is non-zero.
482 *
483 * Return:
484 * * %true if @device_addr is device page aligned; @size is device page
485 * aligned; the range specified by @device_addr and @size is within the
486 * bounds of the device-virtual address space, and @size is non-zero, or
487 * * %false otherwise.
488 */
489 bool
pvr_device_addr_and_size_are_valid(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)490 pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx,
491 u64 device_addr, u64 size)
492 {
493 return pvr_device_addr_is_valid(device_addr) &&
494 drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) &&
495 size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 &&
496 (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE);
497 }
498
pvr_gpuvm_free(struct drm_gpuvm * gpuvm)499 static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm)
500 {
501 kfree(to_pvr_vm_context(gpuvm));
502 }
503
504 static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = {
505 .vm_free = pvr_gpuvm_free,
506 .sm_step_map = pvr_vm_gpuva_map,
507 .sm_step_remap = pvr_vm_gpuva_remap,
508 .sm_step_unmap = pvr_vm_gpuva_unmap,
509 };
510
511 static void
fw_mem_context_init(void * cpu_ptr,void * priv)512 fw_mem_context_init(void *cpu_ptr, void *priv)
513 {
514 struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr;
515 struct pvr_vm_context *vm_ctx = priv;
516
517 fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx);
518 fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET;
519 }
520
521 /**
522 * pvr_vm_create_context() - Create a new VM context.
523 * @pvr_dev: Target PowerVR device.
524 * @is_userspace_context: %true if this context is for userspace. This will
525 * create a firmware memory context for the VM context
526 * and disable warnings when tearing down mappings.
527 *
528 * Return:
529 * * A handle to the newly-minted VM context on success,
530 * * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is
531 * missing or has an unsupported value,
532 * * -%ENOMEM if allocation of the structure behind the opaque handle fails,
533 * or
534 * * Any error encountered while setting up internal structures.
535 */
536 struct pvr_vm_context *
pvr_vm_create_context(struct pvr_device * pvr_dev,bool is_userspace_context)537 pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context)
538 {
539 struct drm_device *drm_dev = from_pvr_device(pvr_dev);
540
541 struct pvr_vm_context *vm_ctx;
542 u16 device_addr_bits;
543
544 int err;
545
546 err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits,
547 &device_addr_bits);
548 if (err) {
549 drm_err(drm_dev,
550 "Failed to get device virtual address space bits\n");
551 return ERR_PTR(err);
552 }
553
554 if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) {
555 drm_err(drm_dev,
556 "Device has unsupported virtual address space size\n");
557 return ERR_PTR(-EINVAL);
558 }
559
560 vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL);
561 if (!vm_ctx)
562 return ERR_PTR(-ENOMEM);
563
564 vm_ctx->pvr_dev = pvr_dev;
565
566 vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev);
567 err = PTR_ERR_OR_ZERO(vm_ctx->mmu_ctx);
568 if (err)
569 goto err_free;
570
571 if (is_userspace_context) {
572 err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext),
573 PVR_BO_FW_FLAGS_DEVICE_UNCACHED,
574 fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj);
575
576 if (err)
577 goto err_page_table_destroy;
578 }
579
580 drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0);
581 drm_gpuvm_init(&vm_ctx->gpuvm_mgr,
582 is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM",
583 0, &pvr_dev->base, &vm_ctx->dummy_gem,
584 0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops);
585
586 mutex_init(&vm_ctx->lock);
587 kref_init(&vm_ctx->ref_count);
588
589 return vm_ctx;
590
591 err_page_table_destroy:
592 pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
593
594 err_free:
595 kfree(vm_ctx);
596
597 return ERR_PTR(err);
598 }
599
600 /**
601 * pvr_vm_unmap_all() - Unmap all mappings associated with a VM context.
602 * @vm_ctx: Target VM context.
603 *
604 * This function ensures that no mappings are left dangling by unmapping them
605 * all in order of ascending device-virtual address.
606 */
607 void
pvr_vm_unmap_all(struct pvr_vm_context * vm_ctx)608 pvr_vm_unmap_all(struct pvr_vm_context *vm_ctx)
609 {
610 WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start,
611 vm_ctx->gpuvm_mgr.mm_range));
612 }
613
614 /**
615 * pvr_vm_context_release() - Teardown a VM context.
616 * @ref_count: Pointer to reference counter of the VM context.
617 *
618 * This function also ensures that no mappings are left dangling by calling
619 * pvr_vm_unmap_all.
620 */
621 static void
pvr_vm_context_release(struct kref * ref_count)622 pvr_vm_context_release(struct kref *ref_count)
623 {
624 struct pvr_vm_context *vm_ctx =
625 container_of(ref_count, struct pvr_vm_context, ref_count);
626
627 if (vm_ctx->fw_mem_ctx_obj)
628 pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj);
629
630 pvr_vm_unmap_all(vm_ctx);
631
632 pvr_mmu_context_destroy(vm_ctx->mmu_ctx);
633 drm_gem_private_object_fini(&vm_ctx->dummy_gem);
634 mutex_destroy(&vm_ctx->lock);
635
636 drm_gpuvm_put(&vm_ctx->gpuvm_mgr);
637 }
638
639 /**
640 * pvr_vm_context_lookup() - Look up VM context from handle
641 * @pvr_file: Pointer to pvr_file structure.
642 * @handle: Object handle.
643 *
644 * Takes reference on VM context object. Call pvr_vm_context_put() to release.
645 *
646 * Returns:
647 * * The requested object on success, or
648 * * %NULL on failure (object does not exist in list, or is not a VM context)
649 */
650 struct pvr_vm_context *
pvr_vm_context_lookup(struct pvr_file * pvr_file,u32 handle)651 pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle)
652 {
653 struct pvr_vm_context *vm_ctx;
654
655 xa_lock(&pvr_file->vm_ctx_handles);
656 vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle);
657 pvr_vm_context_get(vm_ctx);
658 xa_unlock(&pvr_file->vm_ctx_handles);
659
660 return vm_ctx;
661 }
662
663 /**
664 * pvr_vm_context_put() - Release a reference on a VM context
665 * @vm_ctx: Target VM context.
666 *
667 * Returns:
668 * * %true if the VM context was destroyed, or
669 * * %false if there are any references still remaining.
670 */
671 bool
pvr_vm_context_put(struct pvr_vm_context * vm_ctx)672 pvr_vm_context_put(struct pvr_vm_context *vm_ctx)
673 {
674 if (vm_ctx)
675 return kref_put(&vm_ctx->ref_count, pvr_vm_context_release);
676
677 return true;
678 }
679
680 /**
681 * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the
682 * given file.
683 * @pvr_file: Pointer to pvr_file structure.
684 *
685 * Removes all vm_contexts associated with @pvr_file from the device VM context
686 * list and drops initial references. vm_contexts will then be destroyed once
687 * all outstanding references are dropped.
688 */
pvr_destroy_vm_contexts_for_file(struct pvr_file * pvr_file)689 void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file)
690 {
691 struct pvr_vm_context *vm_ctx;
692 unsigned long handle;
693
694 xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) {
695 /* vm_ctx is not used here because that would create a race with xa_erase */
696 pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle));
697 }
698 }
699
700 static int
pvr_vm_lock_extra(struct drm_gpuvm_exec * vm_exec)701 pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec)
702 {
703 struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv;
704 struct pvr_gem_object *pvr_obj = bind_op->pvr_obj;
705
706 /* Unmap operations don't have an object to lock. */
707 if (!pvr_obj)
708 return 0;
709
710 /* Acquire lock on the GEM being mapped. */
711 return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj));
712 }
713
714 /**
715 * pvr_vm_map() - Map a section of physical memory into a section of
716 * device-virtual memory.
717 * @vm_ctx: Target VM context.
718 * @pvr_obj: Target PowerVR memory object.
719 * @pvr_obj_offset: Offset into @pvr_obj to map from.
720 * @device_addr: Virtual device address at the start of the requested mapping.
721 * @size: Size of the requested mapping.
722 *
723 * No handle is returned to represent the mapping. Instead, callers should
724 * remember @device_addr and use that as a handle.
725 *
726 * Return:
727 * * 0 on success,
728 * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
729 * address; the region specified by @pvr_obj_offset and @size does not fall
730 * entirely within @pvr_obj, or any part of the specified region of @pvr_obj
731 * is not device-virtual page-aligned,
732 * * Any error encountered while performing internal operations required to
733 * destroy the mapping (returned from pvr_vm_gpuva_map or
734 * pvr_vm_gpuva_remap).
735 */
736 int
pvr_vm_map(struct pvr_vm_context * vm_ctx,struct pvr_gem_object * pvr_obj,u64 pvr_obj_offset,u64 device_addr,u64 size)737 pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj,
738 u64 pvr_obj_offset, u64 device_addr, u64 size)
739 {
740 struct pvr_vm_bind_op bind_op = {0};
741 struct drm_gpuvm_exec vm_exec = {
742 .vm = &vm_ctx->gpuvm_mgr,
743 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
744 DRM_EXEC_IGNORE_DUPLICATES,
745 .extra = {
746 .fn = pvr_vm_lock_extra,
747 .priv = &bind_op,
748 },
749 };
750
751 int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj,
752 pvr_obj_offset, device_addr,
753 size);
754
755 if (err)
756 return err;
757
758 pvr_gem_object_get(pvr_obj);
759
760 err = drm_gpuvm_exec_lock(&vm_exec);
761 if (err)
762 goto err_cleanup;
763
764 err = pvr_vm_bind_op_exec(&bind_op);
765
766 drm_gpuvm_exec_unlock(&vm_exec);
767
768 err_cleanup:
769 pvr_vm_bind_op_fini(&bind_op);
770
771 return err;
772 }
773
774 /**
775 * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory.
776 * @vm_ctx: Target VM context.
777 * @device_addr: Virtual device address at the start of the target mapping.
778 * @size: Size of the target mapping.
779 *
780 * Return:
781 * * 0 on success,
782 * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual
783 * address,
784 * * Any error encountered while performing internal operations required to
785 * destroy the mapping (returned from pvr_vm_gpuva_unmap or
786 * pvr_vm_gpuva_remap).
787 */
788 int
pvr_vm_unmap(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 size)789 pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size)
790 {
791 struct pvr_vm_bind_op bind_op = {0};
792 struct drm_gpuvm_exec vm_exec = {
793 .vm = &vm_ctx->gpuvm_mgr,
794 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT |
795 DRM_EXEC_IGNORE_DUPLICATES,
796 .extra = {
797 .fn = pvr_vm_lock_extra,
798 .priv = &bind_op,
799 },
800 };
801
802 int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr,
803 size);
804 if (err)
805 return err;
806
807 err = drm_gpuvm_exec_lock(&vm_exec);
808 if (err)
809 goto err_cleanup;
810
811 err = pvr_vm_bind_op_exec(&bind_op);
812
813 drm_gpuvm_exec_unlock(&vm_exec);
814
815 err_cleanup:
816 pvr_vm_bind_op_fini(&bind_op);
817
818 return err;
819 }
820
821 /* Static data areas are determined by firmware. */
822 static const struct drm_pvr_static_data_area static_data_areas[] = {
823 {
824 .area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE,
825 .location_heap_id = DRM_PVR_HEAP_GENERAL,
826 .offset = 0,
827 .size = 128,
828 },
829 {
830 .area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC,
831 .location_heap_id = DRM_PVR_HEAP_GENERAL,
832 .offset = 128,
833 .size = 1024,
834 },
835 {
836 .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
837 .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
838 .offset = 0,
839 .size = 128,
840 },
841 {
842 .area_usage = DRM_PVR_STATIC_DATA_AREA_EOT,
843 .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA,
844 .offset = 128,
845 .size = 128,
846 },
847 {
848 .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC,
849 .location_heap_id = DRM_PVR_HEAP_USC_CODE,
850 .offset = 0,
851 .size = 128,
852 },
853 };
854
855 #define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE)
856
857 /*
858 * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding
859 * static data area for each heap.
860 */
861 static const struct drm_pvr_heap pvr_heaps[] = {
862 [DRM_PVR_HEAP_GENERAL] = {
863 .base = ROGUE_GENERAL_HEAP_BASE,
864 .size = ROGUE_GENERAL_HEAP_SIZE,
865 .flags = 0,
866 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
867 },
868 [DRM_PVR_HEAP_PDS_CODE_DATA] = {
869 .base = ROGUE_PDSCODEDATA_HEAP_BASE,
870 .size = ROGUE_PDSCODEDATA_HEAP_SIZE,
871 .flags = 0,
872 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
873 },
874 [DRM_PVR_HEAP_USC_CODE] = {
875 .base = ROGUE_USCCODE_HEAP_BASE,
876 .size = ROGUE_USCCODE_HEAP_SIZE,
877 .flags = 0,
878 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
879 },
880 [DRM_PVR_HEAP_RGNHDR] = {
881 .base = ROGUE_RGNHDR_HEAP_BASE,
882 .size = ROGUE_RGNHDR_HEAP_SIZE,
883 .flags = 0,
884 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
885 },
886 [DRM_PVR_HEAP_VIS_TEST] = {
887 .base = ROGUE_VISTEST_HEAP_BASE,
888 .size = ROGUE_VISTEST_HEAP_SIZE,
889 .flags = 0,
890 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
891 },
892 [DRM_PVR_HEAP_TRANSFER_FRAG] = {
893 .base = ROGUE_TRANSFER_FRAG_HEAP_BASE,
894 .size = ROGUE_TRANSFER_FRAG_HEAP_SIZE,
895 .flags = 0,
896 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT,
897 },
898 };
899
900 int
pvr_static_data_areas_get(const struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)901 pvr_static_data_areas_get(const struct pvr_device *pvr_dev,
902 struct drm_pvr_ioctl_dev_query_args *args)
903 {
904 struct drm_pvr_dev_query_static_data_areas query = {0};
905 int err;
906
907 if (!args->pointer) {
908 args->size = sizeof(struct drm_pvr_dev_query_static_data_areas);
909 return 0;
910 }
911
912 err = PVR_UOBJ_GET(query, args->size, args->pointer);
913 if (err < 0)
914 return err;
915
916 if (!query.static_data_areas.array) {
917 query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
918 query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area);
919 goto copy_out;
920 }
921
922 if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas))
923 query.static_data_areas.count = ARRAY_SIZE(static_data_areas);
924
925 err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas);
926 if (err < 0)
927 return err;
928
929 copy_out:
930 err = PVR_UOBJ_SET(args->pointer, args->size, query);
931 if (err < 0)
932 return err;
933
934 args->size = sizeof(query);
935 return 0;
936 }
937
938 int
pvr_heap_info_get(const struct pvr_device * pvr_dev,struct drm_pvr_ioctl_dev_query_args * args)939 pvr_heap_info_get(const struct pvr_device *pvr_dev,
940 struct drm_pvr_ioctl_dev_query_args *args)
941 {
942 struct drm_pvr_dev_query_heap_info query = {0};
943 u64 dest;
944 int err;
945
946 if (!args->pointer) {
947 args->size = sizeof(struct drm_pvr_dev_query_heap_info);
948 return 0;
949 }
950
951 err = PVR_UOBJ_GET(query, args->size, args->pointer);
952 if (err < 0)
953 return err;
954
955 if (!query.heaps.array) {
956 query.heaps.count = ARRAY_SIZE(pvr_heaps);
957 query.heaps.stride = sizeof(struct drm_pvr_heap);
958 goto copy_out;
959 }
960
961 if (query.heaps.count > ARRAY_SIZE(pvr_heaps))
962 query.heaps.count = ARRAY_SIZE(pvr_heaps);
963
964 /* Region header heap is only present if BRN63142 is present. */
965 dest = query.heaps.array;
966 for (size_t i = 0; i < query.heaps.count; i++) {
967 struct drm_pvr_heap heap = pvr_heaps[i];
968
969 if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142))
970 heap.size = 0;
971
972 err = PVR_UOBJ_SET(dest, query.heaps.stride, heap);
973 if (err < 0)
974 return err;
975
976 dest += query.heaps.stride;
977 }
978
979 copy_out:
980 err = PVR_UOBJ_SET(args->pointer, args->size, query);
981 if (err < 0)
982 return err;
983
984 args->size = sizeof(query);
985 return 0;
986 }
987
988 /**
989 * pvr_heap_contains_range() - Determine if a given heap contains the specified
990 * device-virtual address range.
991 * @pvr_heap: Target heap.
992 * @start: Inclusive start of the target range.
993 * @end: Inclusive end of the target range.
994 *
995 * It is an error to call this function with values of @start and @end that do
996 * not satisfy the condition @start <= @end.
997 */
998 static __always_inline bool
pvr_heap_contains_range(const struct drm_pvr_heap * pvr_heap,u64 start,u64 end)999 pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end)
1000 {
1001 return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size;
1002 }
1003
1004 /**
1005 * pvr_find_heap_containing() - Find a heap which contains the specified
1006 * device-virtual address range.
1007 * @pvr_dev: Target PowerVR device.
1008 * @start: Start of the target range.
1009 * @size: Size of the target range.
1010 *
1011 * Return:
1012 * * A pointer to a constant instance of struct drm_pvr_heap representing the
1013 * heap containing the entire range specified by @start and @size on
1014 * success, or
1015 * * %NULL if no such heap exists.
1016 */
1017 const struct drm_pvr_heap *
pvr_find_heap_containing(struct pvr_device * pvr_dev,u64 start,u64 size)1018 pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size)
1019 {
1020 u64 end;
1021
1022 if (check_add_overflow(start, size - 1, &end))
1023 return NULL;
1024
1025 /*
1026 * There are no guarantees about the order of address ranges in
1027 * &pvr_heaps, so iterate over the entire array for a heap whose
1028 * range completely encompasses the given range.
1029 */
1030 for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) {
1031 /* Filter heaps that present only with an associated quirk */
1032 if (heap_id == DRM_PVR_HEAP_RGNHDR &&
1033 !PVR_HAS_QUIRK(pvr_dev, 63142)) {
1034 continue;
1035 }
1036
1037 if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end))
1038 return &pvr_heaps[heap_id];
1039 }
1040
1041 return NULL;
1042 }
1043
1044 /**
1045 * pvr_vm_find_gem_object() - Look up a buffer object from a given
1046 * device-virtual address.
1047 * @vm_ctx: [IN] Target VM context.
1048 * @device_addr: [IN] Virtual device address at the start of the required
1049 * object.
1050 * @mapped_offset_out: [OUT] Pointer to location to write offset of the start
1051 * of the mapped region within the buffer object. May be
1052 * %NULL if this information is not required.
1053 * @mapped_size_out: [OUT] Pointer to location to write size of the mapped
1054 * region. May be %NULL if this information is not required.
1055 *
1056 * If successful, a reference will be taken on the buffer object. The caller
1057 * must drop the reference with pvr_gem_object_put().
1058 *
1059 * Return:
1060 * * The PowerVR buffer object mapped at @device_addr if one exists, or
1061 * * %NULL otherwise.
1062 */
1063 struct pvr_gem_object *
pvr_vm_find_gem_object(struct pvr_vm_context * vm_ctx,u64 device_addr,u64 * mapped_offset_out,u64 * mapped_size_out)1064 pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr,
1065 u64 *mapped_offset_out, u64 *mapped_size_out)
1066 {
1067 struct pvr_gem_object *pvr_obj;
1068 struct drm_gpuva *va;
1069
1070 mutex_lock(&vm_ctx->lock);
1071
1072 va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1);
1073 if (!va)
1074 goto err_unlock;
1075
1076 pvr_obj = gem_to_pvr_gem(va->gem.obj);
1077 pvr_gem_object_get(pvr_obj);
1078
1079 if (mapped_offset_out)
1080 *mapped_offset_out = va->gem.offset;
1081 if (mapped_size_out)
1082 *mapped_size_out = va->va.range;
1083
1084 mutex_unlock(&vm_ctx->lock);
1085
1086 return pvr_obj;
1087
1088 err_unlock:
1089 mutex_unlock(&vm_ctx->lock);
1090
1091 return NULL;
1092 }
1093
1094 /**
1095 * pvr_vm_get_fw_mem_context: Get object representing firmware memory context
1096 * @vm_ctx: Target VM context.
1097 *
1098 * Returns:
1099 * * FW object representing firmware memory context, or
1100 * * %NULL if this VM context does not have a firmware memory context.
1101 */
1102 struct pvr_fw_object *
pvr_vm_get_fw_mem_context(struct pvr_vm_context * vm_ctx)1103 pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx)
1104 {
1105 return vm_ctx->fw_mem_ctx_obj;
1106 }
1107