1 // SPDX-License-Identifier: GPL-2.0-only OR MIT 2 /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 4 #include "pvr_vm.h" 5 6 #include "pvr_device.h" 7 #include "pvr_drv.h" 8 #include "pvr_gem.h" 9 #include "pvr_mmu.h" 10 #include "pvr_rogue_fwif.h" 11 #include "pvr_rogue_heap_config.h" 12 13 #include <drm/drm_exec.h> 14 #include <drm/drm_gem.h> 15 #include <drm/drm_gpuvm.h> 16 17 #include <linux/container_of.h> 18 #include <linux/err.h> 19 #include <linux/errno.h> 20 #include <linux/gfp_types.h> 21 #include <linux/kref.h> 22 #include <linux/mutex.h> 23 #include <linux/stddef.h> 24 25 /** 26 * DOC: Memory context 27 * 28 * This is the "top level" datatype in the VM code. It's exposed in the public 29 * API as an opaque handle. 30 */ 31 32 /** 33 * struct pvr_vm_context - Context type used to represent a single VM. 34 */ 35 struct pvr_vm_context { 36 /** 37 * @pvr_dev: The PowerVR device to which this context is bound. 38 * This binding is immutable for the life of the context. 39 */ 40 struct pvr_device *pvr_dev; 41 42 /** @mmu_ctx: The context for binding to physical memory. */ 43 struct pvr_mmu_context *mmu_ctx; 44 45 /** @gpuvm_mgr: GPUVM object associated with this context. */ 46 struct drm_gpuvm gpuvm_mgr; 47 48 /** @lock: Global lock on this VM. */ 49 struct mutex lock; 50 51 /** 52 * @fw_mem_ctx_obj: Firmware object representing firmware memory 53 * context. 54 */ 55 struct pvr_fw_object *fw_mem_ctx_obj; 56 57 /** @ref_count: Reference count of object. */ 58 struct kref ref_count; 59 60 /** 61 * @dummy_gem: GEM object to enable VM reservation. All private BOs 62 * should use the @dummy_gem.resv and not their own _resv field. 63 */ 64 struct drm_gem_object dummy_gem; 65 }; 66 67 static inline 68 struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm) 69 { 70 return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr); 71 } 72 73 struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx) 74 { 75 if (vm_ctx) 76 kref_get(&vm_ctx->ref_count); 77 78 return vm_ctx; 79 } 80 81 /** 82 * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the 83 * page table structure behind a VM context. 84 * @vm_ctx: Target VM context. 85 */ 86 dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx) 87 { 88 return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx); 89 } 90 91 /** 92 * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context. 93 * @vm_ctx: Target VM context. 94 * 95 * This is used to allow private BOs to share a dma_resv for faster fence 96 * updates. 97 * 98 * Returns: The dma_resv pointer. 99 */ 100 struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx) 101 { 102 return vm_ctx->dummy_gem.resv; 103 } 104 105 /** 106 * DOC: Memory mappings 107 */ 108 109 /** 110 * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping. 111 */ 112 struct pvr_vm_gpuva { 113 /** @base: The wrapped drm_gpuva object. */ 114 struct drm_gpuva base; 115 }; 116 117 static __always_inline 118 struct pvr_vm_gpuva *to_pvr_vm_gpuva(struct drm_gpuva *gpuva) 119 { 120 return container_of(gpuva, struct pvr_vm_gpuva, base); 121 } 122 123 enum pvr_vm_bind_type { 124 PVR_VM_BIND_TYPE_MAP, 125 PVR_VM_BIND_TYPE_UNMAP, 126 }; 127 128 /** 129 * struct pvr_vm_bind_op - Context of a map/unmap operation. 130 */ 131 struct pvr_vm_bind_op { 132 /** @type: Map or unmap. */ 133 enum pvr_vm_bind_type type; 134 135 /** @pvr_obj: Object associated with mapping (map only). */ 136 struct pvr_gem_object *pvr_obj; 137 138 /** 139 * @vm_ctx: VM context where the mapping will be created or destroyed. 140 */ 141 struct pvr_vm_context *vm_ctx; 142 143 /** @mmu_op_ctx: MMU op context. */ 144 struct pvr_mmu_op_context *mmu_op_ctx; 145 146 /** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */ 147 struct drm_gpuvm_bo *gpuvm_bo; 148 149 /** 150 * @new_va: Prealloced VA mapping object (init in callback). 151 * Used when creating a mapping. 152 */ 153 struct pvr_vm_gpuva *new_va; 154 155 /** 156 * @prev_va: Prealloced VA mapping object (init in callback). 157 * Used when a mapping or unmapping operation overlaps an existing 158 * mapping and splits away the beginning into a new mapping. 159 */ 160 struct pvr_vm_gpuva *prev_va; 161 162 /** 163 * @next_va: Prealloced VA mapping object (init in callback). 164 * Used when a mapping or unmapping operation overlaps an existing 165 * mapping and splits away the end into a new mapping. 166 */ 167 struct pvr_vm_gpuva *next_va; 168 169 /** @offset: Offset into @pvr_obj to begin mapping from. */ 170 u64 offset; 171 172 /** @device_addr: Device-virtual address at the start of the mapping. */ 173 u64 device_addr; 174 175 /** @size: Size of the desired mapping. */ 176 u64 size; 177 }; 178 179 /** 180 * pvr_vm_bind_op_exec() - Execute a single bind op. 181 * @bind_op: Bind op context. 182 * 183 * Returns: 184 * * 0 on success, 185 * * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or 186 * a callback function. 187 */ 188 static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op) 189 { 190 switch (bind_op->type) { 191 case PVR_VM_BIND_TYPE_MAP: 192 return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr, 193 bind_op, bind_op->device_addr, 194 bind_op->size, 195 gem_from_pvr_gem(bind_op->pvr_obj), 196 bind_op->offset); 197 198 case PVR_VM_BIND_TYPE_UNMAP: 199 return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr, 200 bind_op, bind_op->device_addr, 201 bind_op->size); 202 } 203 204 /* 205 * This shouldn't happen unless something went wrong 206 * in drm_sched. 207 */ 208 WARN_ON(1); 209 return -EINVAL; 210 } 211 212 static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op) 213 { 214 drm_gpuvm_bo_put(bind_op->gpuvm_bo); 215 216 kfree(bind_op->new_va); 217 kfree(bind_op->prev_va); 218 kfree(bind_op->next_va); 219 220 if (bind_op->pvr_obj) 221 pvr_gem_object_put(bind_op->pvr_obj); 222 223 if (bind_op->mmu_op_ctx) 224 pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx); 225 } 226 227 static int 228 pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op, 229 struct pvr_vm_context *vm_ctx, 230 struct pvr_gem_object *pvr_obj, u64 offset, 231 u64 device_addr, u64 size) 232 { 233 struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj); 234 const bool is_user = vm_ctx == vm_ctx->pvr_dev->kernel_vm_ctx; 235 const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj); 236 struct sg_table *sgt; 237 u64 offset_plus_size; 238 int err; 239 240 if (check_add_overflow(offset, size, &offset_plus_size)) 241 return -EINVAL; 242 243 if (is_user && 244 !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) { 245 return -EINVAL; 246 } 247 248 if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) || 249 offset & ~PAGE_MASK || size & ~PAGE_MASK || 250 offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) 251 return -EINVAL; 252 253 bind_op->type = PVR_VM_BIND_TYPE_MAP; 254 255 dma_resv_lock(obj->resv, NULL); 256 bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj); 257 dma_resv_unlock(obj->resv); 258 if (IS_ERR(bind_op->gpuvm_bo)) 259 return PTR_ERR(bind_op->gpuvm_bo); 260 261 bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL); 262 bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL); 263 bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL); 264 if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) { 265 err = -ENOMEM; 266 goto err_bind_op_fini; 267 } 268 269 /* Pin pages so they're ready for use. */ 270 sgt = pvr_gem_object_get_pages_sgt(pvr_obj); 271 err = PTR_ERR_OR_ZERO(sgt); 272 if (err) 273 goto err_bind_op_fini; 274 275 bind_op->mmu_op_ctx = 276 pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size); 277 err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx); 278 if (err) { 279 bind_op->mmu_op_ctx = NULL; 280 goto err_bind_op_fini; 281 } 282 283 bind_op->pvr_obj = pvr_obj; 284 bind_op->vm_ctx = vm_ctx; 285 bind_op->device_addr = device_addr; 286 bind_op->size = size; 287 bind_op->offset = offset; 288 289 return 0; 290 291 err_bind_op_fini: 292 pvr_vm_bind_op_fini(bind_op); 293 294 return err; 295 } 296 297 static int 298 pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op, 299 struct pvr_vm_context *vm_ctx, u64 device_addr, 300 u64 size) 301 { 302 int err; 303 304 if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size)) 305 return -EINVAL; 306 307 bind_op->type = PVR_VM_BIND_TYPE_UNMAP; 308 309 bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL); 310 bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL); 311 if (!bind_op->prev_va || !bind_op->next_va) { 312 err = -ENOMEM; 313 goto err_bind_op_fini; 314 } 315 316 bind_op->mmu_op_ctx = 317 pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0); 318 err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx); 319 if (err) { 320 bind_op->mmu_op_ctx = NULL; 321 goto err_bind_op_fini; 322 } 323 324 bind_op->vm_ctx = vm_ctx; 325 bind_op->device_addr = device_addr; 326 bind_op->size = size; 327 328 return 0; 329 330 err_bind_op_fini: 331 pvr_vm_bind_op_fini(bind_op); 332 333 return err; 334 } 335 336 static int 337 pvr_vm_bind_op_lock_resvs(struct drm_exec *exec, struct pvr_vm_bind_op *bind_op) 338 { 339 drm_exec_until_all_locked(exec) { 340 struct drm_gem_object *r_obj = &bind_op->vm_ctx->dummy_gem; 341 struct drm_gpuvm *gpuvm = &bind_op->vm_ctx->gpuvm_mgr; 342 struct pvr_gem_object *pvr_obj = bind_op->pvr_obj; 343 struct drm_gpuvm_bo *gpuvm_bo; 344 345 /* Acquire lock on the vm_context's reserve object. */ 346 int err = drm_exec_lock_obj(exec, r_obj); 347 348 drm_exec_retry_on_contention(exec); 349 if (err) 350 return err; 351 352 /* Acquire lock on all BOs in the context. */ 353 list_for_each_entry(gpuvm_bo, &gpuvm->extobj.list, 354 list.entry.extobj) { 355 err = drm_exec_lock_obj(exec, gpuvm_bo->obj); 356 357 drm_exec_retry_on_contention(exec); 358 if (err) 359 return err; 360 } 361 362 /* Unmap operations don't have an object to lock. */ 363 if (!pvr_obj) 364 break; 365 366 /* Acquire lock on the GEM being mapped. */ 367 err = drm_exec_lock_obj(exec, 368 gem_from_pvr_gem(bind_op->pvr_obj)); 369 370 drm_exec_retry_on_contention(exec); 371 if (err) 372 return err; 373 } 374 375 return 0; 376 } 377 378 /** 379 * pvr_vm_gpuva_map() - Insert a mapping into a memory context. 380 * @op: gpuva op containing the remap details. 381 * @op_ctx: Operation context. 382 * 383 * Context: Called by drm_gpuvm_sm_map following a successful mapping while 384 * @op_ctx.vm_ctx mutex is held. 385 * 386 * Return: 387 * * 0 on success, or 388 * * Any error returned by pvr_mmu_map(). 389 */ 390 static int 391 pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx) 392 { 393 struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj); 394 struct pvr_vm_bind_op *ctx = op_ctx; 395 int err; 396 397 if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK) 398 return -EINVAL; 399 400 err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags, 401 op->map.va.addr); 402 if (err) 403 return err; 404 405 drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map); 406 drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo); 407 ctx->new_va = NULL; 408 409 return 0; 410 } 411 412 /** 413 * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context. 414 * @op: gpuva op containing the unmap details. 415 * @op_ctx: Operation context. 416 * 417 * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while 418 * @op_ctx.vm_ctx mutex is held. 419 * 420 * Return: 421 * * 0 on success, or 422 * * Any error returned by pvr_mmu_unmap(). 423 */ 424 static int 425 pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx) 426 { 427 struct pvr_vm_bind_op *ctx = op_ctx; 428 429 int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr, 430 op->unmap.va->va.range); 431 432 if (err) 433 return err; 434 435 drm_gpuva_unmap(&op->unmap); 436 drm_gpuva_unlink(op->unmap.va); 437 438 return 0; 439 } 440 441 /** 442 * pvr_vm_gpuva_remap() - Remap a mapping within a memory context. 443 * @op: gpuva op containing the remap details. 444 * @op_ctx: Operation context. 445 * 446 * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a 447 * mapping or unmapping operation causes a region to be split. The 448 * @op_ctx.vm_ctx mutex is held. 449 * 450 * Return: 451 * * 0 on success, or 452 * * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap(). 453 */ 454 static int 455 pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx) 456 { 457 struct pvr_vm_bind_op *ctx = op_ctx; 458 u64 va_start = 0, va_range = 0; 459 int err; 460 461 drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range); 462 err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range); 463 if (err) 464 return err; 465 466 /* No actual remap required: the page table tree depth is fixed to 3, 467 * and we use 4k page table entries only for now. 468 */ 469 drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap); 470 471 if (op->remap.prev) { 472 pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj)); 473 drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo); 474 ctx->prev_va = NULL; 475 } 476 477 if (op->remap.next) { 478 pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj)); 479 drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo); 480 ctx->next_va = NULL; 481 } 482 483 drm_gpuva_unlink(op->remap.unmap->va); 484 485 return 0; 486 } 487 488 /* 489 * Public API 490 * 491 * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h". 492 */ 493 494 /** 495 * pvr_device_addr_is_valid() - Tests whether a device-virtual address 496 * is valid. 497 * @device_addr: Virtual device address to test. 498 * 499 * Return: 500 * * %true if @device_addr is within the valid range for a device page 501 * table and is aligned to the device page size, or 502 * * %false otherwise. 503 */ 504 bool 505 pvr_device_addr_is_valid(u64 device_addr) 506 { 507 return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 && 508 (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0; 509 } 510 511 /** 512 * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual 513 * address and associated size are both valid. 514 * @vm_ctx: Target VM context. 515 * @device_addr: Virtual device address to test. 516 * @size: Size of the range based at @device_addr to test. 517 * 518 * Calling pvr_device_addr_is_valid() twice (once on @size, and again on 519 * @device_addr + @size) to verify a device-virtual address range initially 520 * seems intuitive, but it produces a false-negative when the address range 521 * is right at the end of device-virtual address space. 522 * 523 * This function catches that corner case, as well as checking that 524 * @size is non-zero. 525 * 526 * Return: 527 * * %true if @device_addr is device page aligned; @size is device page 528 * aligned; the range specified by @device_addr and @size is within the 529 * bounds of the device-virtual address space, and @size is non-zero, or 530 * * %false otherwise. 531 */ 532 bool 533 pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx, 534 u64 device_addr, u64 size) 535 { 536 return pvr_device_addr_is_valid(device_addr) && 537 drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) && 538 size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 && 539 (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE); 540 } 541 542 void pvr_gpuvm_free(struct drm_gpuvm *gpuvm) 543 { 544 kfree(to_pvr_vm_context(gpuvm)); 545 } 546 547 static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = { 548 .vm_free = pvr_gpuvm_free, 549 .sm_step_map = pvr_vm_gpuva_map, 550 .sm_step_remap = pvr_vm_gpuva_remap, 551 .sm_step_unmap = pvr_vm_gpuva_unmap, 552 }; 553 554 static void 555 fw_mem_context_init(void *cpu_ptr, void *priv) 556 { 557 struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr; 558 struct pvr_vm_context *vm_ctx = priv; 559 560 fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx); 561 fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET; 562 } 563 564 /** 565 * pvr_vm_create_context() - Create a new VM context. 566 * @pvr_dev: Target PowerVR device. 567 * @is_userspace_context: %true if this context is for userspace. This will 568 * create a firmware memory context for the VM context 569 * and disable warnings when tearing down mappings. 570 * 571 * Return: 572 * * A handle to the newly-minted VM context on success, 573 * * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is 574 * missing or has an unsupported value, 575 * * -%ENOMEM if allocation of the structure behind the opaque handle fails, 576 * or 577 * * Any error encountered while setting up internal structures. 578 */ 579 struct pvr_vm_context * 580 pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context) 581 { 582 struct drm_device *drm_dev = from_pvr_device(pvr_dev); 583 584 struct pvr_vm_context *vm_ctx; 585 u16 device_addr_bits; 586 587 int err; 588 589 err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits, 590 &device_addr_bits); 591 if (err) { 592 drm_err(drm_dev, 593 "Failed to get device virtual address space bits\n"); 594 return ERR_PTR(err); 595 } 596 597 if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) { 598 drm_err(drm_dev, 599 "Device has unsupported virtual address space size\n"); 600 return ERR_PTR(-EINVAL); 601 } 602 603 vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL); 604 if (!vm_ctx) 605 return ERR_PTR(-ENOMEM); 606 607 drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0); 608 609 vm_ctx->pvr_dev = pvr_dev; 610 kref_init(&vm_ctx->ref_count); 611 mutex_init(&vm_ctx->lock); 612 613 drm_gpuvm_init(&vm_ctx->gpuvm_mgr, 614 is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM", 615 0, &pvr_dev->base, &vm_ctx->dummy_gem, 616 0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops); 617 618 vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev); 619 err = PTR_ERR_OR_ZERO(&vm_ctx->mmu_ctx); 620 if (err) { 621 vm_ctx->mmu_ctx = NULL; 622 goto err_put_ctx; 623 } 624 625 if (is_userspace_context) { 626 err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext), 627 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 628 fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj); 629 630 if (err) 631 goto err_page_table_destroy; 632 } 633 634 return vm_ctx; 635 636 err_page_table_destroy: 637 pvr_mmu_context_destroy(vm_ctx->mmu_ctx); 638 639 err_put_ctx: 640 pvr_vm_context_put(vm_ctx); 641 642 return ERR_PTR(err); 643 } 644 645 /** 646 * pvr_vm_context_release() - Teardown a VM context. 647 * @ref_count: Pointer to reference counter of the VM context. 648 * 649 * This function ensures that no mappings are left dangling by unmapping them 650 * all in order of ascending device-virtual address. 651 */ 652 static void 653 pvr_vm_context_release(struct kref *ref_count) 654 { 655 struct pvr_vm_context *vm_ctx = 656 container_of(ref_count, struct pvr_vm_context, ref_count); 657 658 if (vm_ctx->fw_mem_ctx_obj) 659 pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj); 660 661 WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, 662 vm_ctx->gpuvm_mgr.mm_range)); 663 664 pvr_mmu_context_destroy(vm_ctx->mmu_ctx); 665 drm_gem_private_object_fini(&vm_ctx->dummy_gem); 666 mutex_destroy(&vm_ctx->lock); 667 668 drm_gpuvm_put(&vm_ctx->gpuvm_mgr); 669 } 670 671 /** 672 * pvr_vm_context_lookup() - Look up VM context from handle 673 * @pvr_file: Pointer to pvr_file structure. 674 * @handle: Object handle. 675 * 676 * Takes reference on VM context object. Call pvr_vm_context_put() to release. 677 * 678 * Returns: 679 * * The requested object on success, or 680 * * %NULL on failure (object does not exist in list, or is not a VM context) 681 */ 682 struct pvr_vm_context * 683 pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle) 684 { 685 struct pvr_vm_context *vm_ctx; 686 687 xa_lock(&pvr_file->vm_ctx_handles); 688 vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle); 689 if (vm_ctx) 690 kref_get(&vm_ctx->ref_count); 691 692 xa_unlock(&pvr_file->vm_ctx_handles); 693 694 return vm_ctx; 695 } 696 697 /** 698 * pvr_vm_context_put() - Release a reference on a VM context 699 * @vm_ctx: Target VM context. 700 * 701 * Returns: 702 * * %true if the VM context was destroyed, or 703 * * %false if there are any references still remaining. 704 */ 705 bool 706 pvr_vm_context_put(struct pvr_vm_context *vm_ctx) 707 { 708 if (vm_ctx) 709 return kref_put(&vm_ctx->ref_count, pvr_vm_context_release); 710 711 return true; 712 } 713 714 /** 715 * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the 716 * given file. 717 * @pvr_file: Pointer to pvr_file structure. 718 * 719 * Removes all vm_contexts associated with @pvr_file from the device VM context 720 * list and drops initial references. vm_contexts will then be destroyed once 721 * all outstanding references are dropped. 722 */ 723 void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file) 724 { 725 struct pvr_vm_context *vm_ctx; 726 unsigned long handle; 727 728 xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) { 729 /* vm_ctx is not used here because that would create a race with xa_erase */ 730 pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle)); 731 } 732 } 733 734 /** 735 * pvr_vm_map() - Map a section of physical memory into a section of 736 * device-virtual memory. 737 * @vm_ctx: Target VM context. 738 * @pvr_obj: Target PowerVR memory object. 739 * @pvr_obj_offset: Offset into @pvr_obj to map from. 740 * @device_addr: Virtual device address at the start of the requested mapping. 741 * @size: Size of the requested mapping. 742 * 743 * No handle is returned to represent the mapping. Instead, callers should 744 * remember @device_addr and use that as a handle. 745 * 746 * Return: 747 * * 0 on success, 748 * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual 749 * address; the region specified by @pvr_obj_offset and @size does not fall 750 * entirely within @pvr_obj, or any part of the specified region of @pvr_obj 751 * is not device-virtual page-aligned, 752 * * Any error encountered while performing internal operations required to 753 * destroy the mapping (returned from pvr_vm_gpuva_map or 754 * pvr_vm_gpuva_remap). 755 */ 756 int 757 pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, 758 u64 pvr_obj_offset, u64 device_addr, u64 size) 759 { 760 struct pvr_vm_bind_op bind_op = {0}; 761 struct drm_exec exec; 762 763 int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj, 764 pvr_obj_offset, device_addr, 765 size); 766 767 if (err) 768 return err; 769 770 drm_exec_init(&exec, 771 DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES); 772 773 pvr_gem_object_get(pvr_obj); 774 775 err = pvr_vm_bind_op_lock_resvs(&exec, &bind_op); 776 if (err) 777 goto err_cleanup; 778 779 err = pvr_vm_bind_op_exec(&bind_op); 780 781 drm_exec_fini(&exec); 782 783 err_cleanup: 784 pvr_vm_bind_op_fini(&bind_op); 785 786 return err; 787 } 788 789 /** 790 * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory. 791 * @vm_ctx: Target VM context. 792 * @device_addr: Virtual device address at the start of the target mapping. 793 * @size: Size of the target mapping. 794 * 795 * Return: 796 * * 0 on success, 797 * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual 798 * address, 799 * * Any error encountered while performing internal operations required to 800 * destroy the mapping (returned from pvr_vm_gpuva_unmap or 801 * pvr_vm_gpuva_remap). 802 */ 803 int 804 pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) 805 { 806 struct pvr_vm_bind_op bind_op = {0}; 807 struct drm_exec exec; 808 809 int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr, 810 size); 811 812 if (err) 813 return err; 814 815 drm_exec_init(&exec, 816 DRM_EXEC_INTERRUPTIBLE_WAIT | DRM_EXEC_IGNORE_DUPLICATES); 817 818 err = pvr_vm_bind_op_lock_resvs(&exec, &bind_op); 819 if (err) 820 goto err_cleanup; 821 822 err = pvr_vm_bind_op_exec(&bind_op); 823 824 drm_exec_fini(&exec); 825 826 err_cleanup: 827 pvr_vm_bind_op_fini(&bind_op); 828 829 return err; 830 } 831 832 /* Static data areas are determined by firmware. */ 833 static const struct drm_pvr_static_data_area static_data_areas[] = { 834 { 835 .area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE, 836 .location_heap_id = DRM_PVR_HEAP_GENERAL, 837 .offset = 0, 838 .size = 128, 839 }, 840 { 841 .area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC, 842 .location_heap_id = DRM_PVR_HEAP_GENERAL, 843 .offset = 128, 844 .size = 1024, 845 }, 846 { 847 .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC, 848 .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA, 849 .offset = 0, 850 .size = 128, 851 }, 852 { 853 .area_usage = DRM_PVR_STATIC_DATA_AREA_EOT, 854 .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA, 855 .offset = 128, 856 .size = 128, 857 }, 858 { 859 .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC, 860 .location_heap_id = DRM_PVR_HEAP_USC_CODE, 861 .offset = 0, 862 .size = 128, 863 }, 864 }; 865 866 #define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE) 867 868 /* 869 * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding 870 * static data area for each heap. 871 */ 872 static const struct drm_pvr_heap pvr_heaps[] = { 873 [DRM_PVR_HEAP_GENERAL] = { 874 .base = ROGUE_GENERAL_HEAP_BASE, 875 .size = ROGUE_GENERAL_HEAP_SIZE, 876 .flags = 0, 877 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, 878 }, 879 [DRM_PVR_HEAP_PDS_CODE_DATA] = { 880 .base = ROGUE_PDSCODEDATA_HEAP_BASE, 881 .size = ROGUE_PDSCODEDATA_HEAP_SIZE, 882 .flags = 0, 883 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, 884 }, 885 [DRM_PVR_HEAP_USC_CODE] = { 886 .base = ROGUE_USCCODE_HEAP_BASE, 887 .size = ROGUE_USCCODE_HEAP_SIZE, 888 .flags = 0, 889 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, 890 }, 891 [DRM_PVR_HEAP_RGNHDR] = { 892 .base = ROGUE_RGNHDR_HEAP_BASE, 893 .size = ROGUE_RGNHDR_HEAP_SIZE, 894 .flags = 0, 895 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, 896 }, 897 [DRM_PVR_HEAP_VIS_TEST] = { 898 .base = ROGUE_VISTEST_HEAP_BASE, 899 .size = ROGUE_VISTEST_HEAP_SIZE, 900 .flags = 0, 901 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, 902 }, 903 [DRM_PVR_HEAP_TRANSFER_FRAG] = { 904 .base = ROGUE_TRANSFER_FRAG_HEAP_BASE, 905 .size = ROGUE_TRANSFER_FRAG_HEAP_SIZE, 906 .flags = 0, 907 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, 908 }, 909 }; 910 911 int 912 pvr_static_data_areas_get(const struct pvr_device *pvr_dev, 913 struct drm_pvr_ioctl_dev_query_args *args) 914 { 915 struct drm_pvr_dev_query_static_data_areas query = {0}; 916 int err; 917 918 if (!args->pointer) { 919 args->size = sizeof(struct drm_pvr_dev_query_static_data_areas); 920 return 0; 921 } 922 923 err = PVR_UOBJ_GET(query, args->size, args->pointer); 924 if (err < 0) 925 return err; 926 927 if (!query.static_data_areas.array) { 928 query.static_data_areas.count = ARRAY_SIZE(static_data_areas); 929 query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area); 930 goto copy_out; 931 } 932 933 if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas)) 934 query.static_data_areas.count = ARRAY_SIZE(static_data_areas); 935 936 err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas); 937 if (err < 0) 938 return err; 939 940 copy_out: 941 err = PVR_UOBJ_SET(args->pointer, args->size, query); 942 if (err < 0) 943 return err; 944 945 args->size = sizeof(query); 946 return 0; 947 } 948 949 int 950 pvr_heap_info_get(const struct pvr_device *pvr_dev, 951 struct drm_pvr_ioctl_dev_query_args *args) 952 { 953 struct drm_pvr_dev_query_heap_info query = {0}; 954 u64 dest; 955 int err; 956 957 if (!args->pointer) { 958 args->size = sizeof(struct drm_pvr_dev_query_heap_info); 959 return 0; 960 } 961 962 err = PVR_UOBJ_GET(query, args->size, args->pointer); 963 if (err < 0) 964 return err; 965 966 if (!query.heaps.array) { 967 query.heaps.count = ARRAY_SIZE(pvr_heaps); 968 query.heaps.stride = sizeof(struct drm_pvr_heap); 969 goto copy_out; 970 } 971 972 if (query.heaps.count > ARRAY_SIZE(pvr_heaps)) 973 query.heaps.count = ARRAY_SIZE(pvr_heaps); 974 975 /* Region header heap is only present if BRN63142 is present. */ 976 dest = query.heaps.array; 977 for (size_t i = 0; i < query.heaps.count; i++) { 978 struct drm_pvr_heap heap = pvr_heaps[i]; 979 980 if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142)) 981 heap.size = 0; 982 983 err = PVR_UOBJ_SET(dest, query.heaps.stride, heap); 984 if (err < 0) 985 return err; 986 987 dest += query.heaps.stride; 988 } 989 990 copy_out: 991 err = PVR_UOBJ_SET(args->pointer, args->size, query); 992 if (err < 0) 993 return err; 994 995 args->size = sizeof(query); 996 return 0; 997 } 998 999 /** 1000 * pvr_heap_contains_range() - Determine if a given heap contains the specified 1001 * device-virtual address range. 1002 * @pvr_heap: Target heap. 1003 * @start: Inclusive start of the target range. 1004 * @end: Inclusive end of the target range. 1005 * 1006 * It is an error to call this function with values of @start and @end that do 1007 * not satisfy the condition @start <= @end. 1008 */ 1009 static __always_inline bool 1010 pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end) 1011 { 1012 return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size; 1013 } 1014 1015 /** 1016 * pvr_find_heap_containing() - Find a heap which contains the specified 1017 * device-virtual address range. 1018 * @pvr_dev: Target PowerVR device. 1019 * @start: Start of the target range. 1020 * @size: Size of the target range. 1021 * 1022 * Return: 1023 * * A pointer to a constant instance of struct drm_pvr_heap representing the 1024 * heap containing the entire range specified by @start and @size on 1025 * success, or 1026 * * %NULL if no such heap exists. 1027 */ 1028 const struct drm_pvr_heap * 1029 pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size) 1030 { 1031 u64 end; 1032 1033 if (check_add_overflow(start, size - 1, &end)) 1034 return NULL; 1035 1036 /* 1037 * There are no guarantees about the order of address ranges in 1038 * &pvr_heaps, so iterate over the entire array for a heap whose 1039 * range completely encompasses the given range. 1040 */ 1041 for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) { 1042 /* Filter heaps that present only with an associated quirk */ 1043 if (heap_id == DRM_PVR_HEAP_RGNHDR && 1044 !PVR_HAS_QUIRK(pvr_dev, 63142)) { 1045 continue; 1046 } 1047 1048 if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end)) 1049 return &pvr_heaps[heap_id]; 1050 } 1051 1052 return NULL; 1053 } 1054 1055 /** 1056 * pvr_vm_find_gem_object() - Look up a buffer object from a given 1057 * device-virtual address. 1058 * @vm_ctx: [IN] Target VM context. 1059 * @device_addr: [IN] Virtual device address at the start of the required 1060 * object. 1061 * @mapped_offset_out: [OUT] Pointer to location to write offset of the start 1062 * of the mapped region within the buffer object. May be 1063 * %NULL if this information is not required. 1064 * @mapped_size_out: [OUT] Pointer to location to write size of the mapped 1065 * region. May be %NULL if this information is not required. 1066 * 1067 * If successful, a reference will be taken on the buffer object. The caller 1068 * must drop the reference with pvr_gem_object_put(). 1069 * 1070 * Return: 1071 * * The PowerVR buffer object mapped at @device_addr if one exists, or 1072 * * %NULL otherwise. 1073 */ 1074 struct pvr_gem_object * 1075 pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr, 1076 u64 *mapped_offset_out, u64 *mapped_size_out) 1077 { 1078 struct pvr_gem_object *pvr_obj; 1079 struct drm_gpuva *va; 1080 1081 mutex_lock(&vm_ctx->lock); 1082 1083 va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1); 1084 if (!va) 1085 goto err_unlock; 1086 1087 pvr_obj = gem_to_pvr_gem(va->gem.obj); 1088 pvr_gem_object_get(pvr_obj); 1089 1090 if (mapped_offset_out) 1091 *mapped_offset_out = va->gem.offset; 1092 if (mapped_size_out) 1093 *mapped_size_out = va->va.range; 1094 1095 mutex_unlock(&vm_ctx->lock); 1096 1097 return pvr_obj; 1098 1099 err_unlock: 1100 mutex_unlock(&vm_ctx->lock); 1101 1102 return NULL; 1103 } 1104 1105 /** 1106 * pvr_vm_get_fw_mem_context: Get object representing firmware memory context 1107 * @vm_ctx: Target VM context. 1108 * 1109 * Returns: 1110 * * FW object representing firmware memory context, or 1111 * * %NULL if this VM context does not have a firmware memory context. 1112 */ 1113 struct pvr_fw_object * 1114 pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx) 1115 { 1116 return vm_ctx->fw_mem_ctx_obj; 1117 } 1118