1 // SPDX-License-Identifier: GPL-2.0-only OR MIT 2 /* Copyright (c) 2023 Imagination Technologies Ltd. */ 3 4 #include "pvr_vm.h" 5 6 #include "pvr_device.h" 7 #include "pvr_drv.h" 8 #include "pvr_gem.h" 9 #include "pvr_mmu.h" 10 #include "pvr_rogue_fwif.h" 11 #include "pvr_rogue_heap_config.h" 12 13 #include <drm/drm_exec.h> 14 #include <drm/drm_gem.h> 15 #include <drm/drm_gpuvm.h> 16 17 #include <linux/container_of.h> 18 #include <linux/err.h> 19 #include <linux/errno.h> 20 #include <linux/gfp_types.h> 21 #include <linux/kref.h> 22 #include <linux/mutex.h> 23 #include <linux/stddef.h> 24 25 /** 26 * DOC: Memory context 27 * 28 * This is the "top level" datatype in the VM code. It's exposed in the public 29 * API as an opaque handle. 30 */ 31 32 /** 33 * struct pvr_vm_context - Context type used to represent a single VM. 34 */ 35 struct pvr_vm_context { 36 /** 37 * @pvr_dev: The PowerVR device to which this context is bound. 38 * This binding is immutable for the life of the context. 39 */ 40 struct pvr_device *pvr_dev; 41 42 /** @mmu_ctx: The context for binding to physical memory. */ 43 struct pvr_mmu_context *mmu_ctx; 44 45 /** @gpuvm_mgr: GPUVM object associated with this context. */ 46 struct drm_gpuvm gpuvm_mgr; 47 48 /** @lock: Global lock on this VM. */ 49 struct mutex lock; 50 51 /** 52 * @fw_mem_ctx_obj: Firmware object representing firmware memory 53 * context. 54 */ 55 struct pvr_fw_object *fw_mem_ctx_obj; 56 57 /** @ref_count: Reference count of object. */ 58 struct kref ref_count; 59 60 /** 61 * @dummy_gem: GEM object to enable VM reservation. All private BOs 62 * should use the @dummy_gem.resv and not their own _resv field. 63 */ 64 struct drm_gem_object dummy_gem; 65 }; 66 67 static inline 68 struct pvr_vm_context *to_pvr_vm_context(struct drm_gpuvm *gpuvm) 69 { 70 return container_of(gpuvm, struct pvr_vm_context, gpuvm_mgr); 71 } 72 73 struct pvr_vm_context *pvr_vm_context_get(struct pvr_vm_context *vm_ctx) 74 { 75 if (vm_ctx) 76 kref_get(&vm_ctx->ref_count); 77 78 return vm_ctx; 79 } 80 81 /** 82 * pvr_vm_get_page_table_root_addr() - Get the DMA address of the root of the 83 * page table structure behind a VM context. 84 * @vm_ctx: Target VM context. 85 */ 86 dma_addr_t pvr_vm_get_page_table_root_addr(struct pvr_vm_context *vm_ctx) 87 { 88 return pvr_mmu_get_root_table_dma_addr(vm_ctx->mmu_ctx); 89 } 90 91 /** 92 * pvr_vm_get_dma_resv() - Expose the dma_resv owned by the VM context. 93 * @vm_ctx: Target VM context. 94 * 95 * This is used to allow private BOs to share a dma_resv for faster fence 96 * updates. 97 * 98 * Returns: The dma_resv pointer. 99 */ 100 struct dma_resv *pvr_vm_get_dma_resv(struct pvr_vm_context *vm_ctx) 101 { 102 return vm_ctx->dummy_gem.resv; 103 } 104 105 /** 106 * DOC: Memory mappings 107 */ 108 109 /** 110 * struct pvr_vm_gpuva - Wrapper type representing a single VM mapping. 111 */ 112 struct pvr_vm_gpuva { 113 /** @base: The wrapped drm_gpuva object. */ 114 struct drm_gpuva base; 115 }; 116 117 enum pvr_vm_bind_type { 118 PVR_VM_BIND_TYPE_MAP, 119 PVR_VM_BIND_TYPE_UNMAP, 120 }; 121 122 /** 123 * struct pvr_vm_bind_op - Context of a map/unmap operation. 124 */ 125 struct pvr_vm_bind_op { 126 /** @type: Map or unmap. */ 127 enum pvr_vm_bind_type type; 128 129 /** @pvr_obj: Object associated with mapping (map only). */ 130 struct pvr_gem_object *pvr_obj; 131 132 /** 133 * @vm_ctx: VM context where the mapping will be created or destroyed. 134 */ 135 struct pvr_vm_context *vm_ctx; 136 137 /** @mmu_op_ctx: MMU op context. */ 138 struct pvr_mmu_op_context *mmu_op_ctx; 139 140 /** @gpuvm_bo: Prealloced wrapped BO for attaching to the gpuvm. */ 141 struct drm_gpuvm_bo *gpuvm_bo; 142 143 /** 144 * @new_va: Prealloced VA mapping object (init in callback). 145 * Used when creating a mapping. 146 */ 147 struct pvr_vm_gpuva *new_va; 148 149 /** 150 * @prev_va: Prealloced VA mapping object (init in callback). 151 * Used when a mapping or unmapping operation overlaps an existing 152 * mapping and splits away the beginning into a new mapping. 153 */ 154 struct pvr_vm_gpuva *prev_va; 155 156 /** 157 * @next_va: Prealloced VA mapping object (init in callback). 158 * Used when a mapping or unmapping operation overlaps an existing 159 * mapping and splits away the end into a new mapping. 160 */ 161 struct pvr_vm_gpuva *next_va; 162 163 /** @offset: Offset into @pvr_obj to begin mapping from. */ 164 u64 offset; 165 166 /** @device_addr: Device-virtual address at the start of the mapping. */ 167 u64 device_addr; 168 169 /** @size: Size of the desired mapping. */ 170 u64 size; 171 }; 172 173 /** 174 * pvr_vm_bind_op_exec() - Execute a single bind op. 175 * @bind_op: Bind op context. 176 * 177 * Returns: 178 * * 0 on success, 179 * * Any error code returned by drm_gpuva_sm_map(), drm_gpuva_sm_unmap(), or 180 * a callback function. 181 */ 182 static int pvr_vm_bind_op_exec(struct pvr_vm_bind_op *bind_op) 183 { 184 switch (bind_op->type) { 185 case PVR_VM_BIND_TYPE_MAP: 186 return drm_gpuvm_sm_map(&bind_op->vm_ctx->gpuvm_mgr, 187 bind_op, bind_op->device_addr, 188 bind_op->size, 189 gem_from_pvr_gem(bind_op->pvr_obj), 190 bind_op->offset); 191 192 case PVR_VM_BIND_TYPE_UNMAP: 193 return drm_gpuvm_sm_unmap(&bind_op->vm_ctx->gpuvm_mgr, 194 bind_op, bind_op->device_addr, 195 bind_op->size); 196 } 197 198 /* 199 * This shouldn't happen unless something went wrong 200 * in drm_sched. 201 */ 202 WARN_ON(1); 203 return -EINVAL; 204 } 205 206 static void pvr_vm_bind_op_fini(struct pvr_vm_bind_op *bind_op) 207 { 208 drm_gpuvm_bo_put(bind_op->gpuvm_bo); 209 210 kfree(bind_op->new_va); 211 kfree(bind_op->prev_va); 212 kfree(bind_op->next_va); 213 214 if (bind_op->pvr_obj) 215 pvr_gem_object_put(bind_op->pvr_obj); 216 217 if (bind_op->mmu_op_ctx) 218 pvr_mmu_op_context_destroy(bind_op->mmu_op_ctx); 219 } 220 221 static int 222 pvr_vm_bind_op_map_init(struct pvr_vm_bind_op *bind_op, 223 struct pvr_vm_context *vm_ctx, 224 struct pvr_gem_object *pvr_obj, u64 offset, 225 u64 device_addr, u64 size) 226 { 227 struct drm_gem_object *obj = gem_from_pvr_gem(pvr_obj); 228 const bool is_user = vm_ctx == vm_ctx->pvr_dev->kernel_vm_ctx; 229 const u64 pvr_obj_size = pvr_gem_object_size(pvr_obj); 230 struct sg_table *sgt; 231 u64 offset_plus_size; 232 int err; 233 234 if (check_add_overflow(offset, size, &offset_plus_size)) 235 return -EINVAL; 236 237 if (is_user && 238 !pvr_find_heap_containing(vm_ctx->pvr_dev, device_addr, size)) { 239 return -EINVAL; 240 } 241 242 if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size) || 243 offset & ~PAGE_MASK || size & ~PAGE_MASK || 244 offset >= pvr_obj_size || offset_plus_size > pvr_obj_size) 245 return -EINVAL; 246 247 bind_op->type = PVR_VM_BIND_TYPE_MAP; 248 249 dma_resv_lock(obj->resv, NULL); 250 bind_op->gpuvm_bo = drm_gpuvm_bo_obtain(&vm_ctx->gpuvm_mgr, obj); 251 dma_resv_unlock(obj->resv); 252 if (IS_ERR(bind_op->gpuvm_bo)) 253 return PTR_ERR(bind_op->gpuvm_bo); 254 255 bind_op->new_va = kzalloc(sizeof(*bind_op->new_va), GFP_KERNEL); 256 bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL); 257 bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL); 258 if (!bind_op->new_va || !bind_op->prev_va || !bind_op->next_va) { 259 err = -ENOMEM; 260 goto err_bind_op_fini; 261 } 262 263 /* Pin pages so they're ready for use. */ 264 sgt = pvr_gem_object_get_pages_sgt(pvr_obj); 265 err = PTR_ERR_OR_ZERO(sgt); 266 if (err) 267 goto err_bind_op_fini; 268 269 bind_op->mmu_op_ctx = 270 pvr_mmu_op_context_create(vm_ctx->mmu_ctx, sgt, offset, size); 271 err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx); 272 if (err) { 273 bind_op->mmu_op_ctx = NULL; 274 goto err_bind_op_fini; 275 } 276 277 bind_op->pvr_obj = pvr_obj; 278 bind_op->vm_ctx = vm_ctx; 279 bind_op->device_addr = device_addr; 280 bind_op->size = size; 281 bind_op->offset = offset; 282 283 return 0; 284 285 err_bind_op_fini: 286 pvr_vm_bind_op_fini(bind_op); 287 288 return err; 289 } 290 291 static int 292 pvr_vm_bind_op_unmap_init(struct pvr_vm_bind_op *bind_op, 293 struct pvr_vm_context *vm_ctx, u64 device_addr, 294 u64 size) 295 { 296 int err; 297 298 if (!pvr_device_addr_and_size_are_valid(vm_ctx, device_addr, size)) 299 return -EINVAL; 300 301 bind_op->type = PVR_VM_BIND_TYPE_UNMAP; 302 303 bind_op->prev_va = kzalloc(sizeof(*bind_op->prev_va), GFP_KERNEL); 304 bind_op->next_va = kzalloc(sizeof(*bind_op->next_va), GFP_KERNEL); 305 if (!bind_op->prev_va || !bind_op->next_va) { 306 err = -ENOMEM; 307 goto err_bind_op_fini; 308 } 309 310 bind_op->mmu_op_ctx = 311 pvr_mmu_op_context_create(vm_ctx->mmu_ctx, NULL, 0, 0); 312 err = PTR_ERR_OR_ZERO(bind_op->mmu_op_ctx); 313 if (err) { 314 bind_op->mmu_op_ctx = NULL; 315 goto err_bind_op_fini; 316 } 317 318 bind_op->vm_ctx = vm_ctx; 319 bind_op->device_addr = device_addr; 320 bind_op->size = size; 321 322 return 0; 323 324 err_bind_op_fini: 325 pvr_vm_bind_op_fini(bind_op); 326 327 return err; 328 } 329 330 /** 331 * pvr_vm_gpuva_map() - Insert a mapping into a memory context. 332 * @op: gpuva op containing the remap details. 333 * @op_ctx: Operation context. 334 * 335 * Context: Called by drm_gpuvm_sm_map following a successful mapping while 336 * @op_ctx.vm_ctx mutex is held. 337 * 338 * Return: 339 * * 0 on success, or 340 * * Any error returned by pvr_mmu_map(). 341 */ 342 static int 343 pvr_vm_gpuva_map(struct drm_gpuva_op *op, void *op_ctx) 344 { 345 struct pvr_gem_object *pvr_gem = gem_to_pvr_gem(op->map.gem.obj); 346 struct pvr_vm_bind_op *ctx = op_ctx; 347 int err; 348 349 if ((op->map.gem.offset | op->map.va.range) & ~PVR_DEVICE_PAGE_MASK) 350 return -EINVAL; 351 352 err = pvr_mmu_map(ctx->mmu_op_ctx, op->map.va.range, pvr_gem->flags, 353 op->map.va.addr); 354 if (err) 355 return err; 356 357 drm_gpuva_map(&ctx->vm_ctx->gpuvm_mgr, &ctx->new_va->base, &op->map); 358 drm_gpuva_link(&ctx->new_va->base, ctx->gpuvm_bo); 359 ctx->new_va = NULL; 360 361 return 0; 362 } 363 364 /** 365 * pvr_vm_gpuva_unmap() - Remove a mapping from a memory context. 366 * @op: gpuva op containing the unmap details. 367 * @op_ctx: Operation context. 368 * 369 * Context: Called by drm_gpuvm_sm_unmap following a successful unmapping while 370 * @op_ctx.vm_ctx mutex is held. 371 * 372 * Return: 373 * * 0 on success, or 374 * * Any error returned by pvr_mmu_unmap(). 375 */ 376 static int 377 pvr_vm_gpuva_unmap(struct drm_gpuva_op *op, void *op_ctx) 378 { 379 struct pvr_vm_bind_op *ctx = op_ctx; 380 381 int err = pvr_mmu_unmap(ctx->mmu_op_ctx, op->unmap.va->va.addr, 382 op->unmap.va->va.range); 383 384 if (err) 385 return err; 386 387 drm_gpuva_unmap(&op->unmap); 388 drm_gpuva_unlink(op->unmap.va); 389 390 return 0; 391 } 392 393 /** 394 * pvr_vm_gpuva_remap() - Remap a mapping within a memory context. 395 * @op: gpuva op containing the remap details. 396 * @op_ctx: Operation context. 397 * 398 * Context: Called by either drm_gpuvm_sm_map or drm_gpuvm_sm_unmap when a 399 * mapping or unmapping operation causes a region to be split. The 400 * @op_ctx.vm_ctx mutex is held. 401 * 402 * Return: 403 * * 0 on success, or 404 * * Any error returned by pvr_vm_gpuva_unmap() or pvr_vm_gpuva_unmap(). 405 */ 406 static int 407 pvr_vm_gpuva_remap(struct drm_gpuva_op *op, void *op_ctx) 408 { 409 struct pvr_vm_bind_op *ctx = op_ctx; 410 u64 va_start = 0, va_range = 0; 411 int err; 412 413 drm_gpuva_op_remap_to_unmap_range(&op->remap, &va_start, &va_range); 414 err = pvr_mmu_unmap(ctx->mmu_op_ctx, va_start, va_range); 415 if (err) 416 return err; 417 418 /* No actual remap required: the page table tree depth is fixed to 3, 419 * and we use 4k page table entries only for now. 420 */ 421 drm_gpuva_remap(&ctx->prev_va->base, &ctx->next_va->base, &op->remap); 422 423 if (op->remap.prev) { 424 pvr_gem_object_get(gem_to_pvr_gem(ctx->prev_va->base.gem.obj)); 425 drm_gpuva_link(&ctx->prev_va->base, ctx->gpuvm_bo); 426 ctx->prev_va = NULL; 427 } 428 429 if (op->remap.next) { 430 pvr_gem_object_get(gem_to_pvr_gem(ctx->next_va->base.gem.obj)); 431 drm_gpuva_link(&ctx->next_va->base, ctx->gpuvm_bo); 432 ctx->next_va = NULL; 433 } 434 435 drm_gpuva_unlink(op->remap.unmap->va); 436 437 return 0; 438 } 439 440 /* 441 * Public API 442 * 443 * For an overview of these functions, see *DOC: Public API* in "pvr_vm.h". 444 */ 445 446 /** 447 * pvr_device_addr_is_valid() - Tests whether a device-virtual address 448 * is valid. 449 * @device_addr: Virtual device address to test. 450 * 451 * Return: 452 * * %true if @device_addr is within the valid range for a device page 453 * table and is aligned to the device page size, or 454 * * %false otherwise. 455 */ 456 bool 457 pvr_device_addr_is_valid(u64 device_addr) 458 { 459 return (device_addr & ~PVR_PAGE_TABLE_ADDR_MASK) == 0 && 460 (device_addr & ~PVR_DEVICE_PAGE_MASK) == 0; 461 } 462 463 /** 464 * pvr_device_addr_and_size_are_valid() - Tests whether a device-virtual 465 * address and associated size are both valid. 466 * @vm_ctx: Target VM context. 467 * @device_addr: Virtual device address to test. 468 * @size: Size of the range based at @device_addr to test. 469 * 470 * Calling pvr_device_addr_is_valid() twice (once on @size, and again on 471 * @device_addr + @size) to verify a device-virtual address range initially 472 * seems intuitive, but it produces a false-negative when the address range 473 * is right at the end of device-virtual address space. 474 * 475 * This function catches that corner case, as well as checking that 476 * @size is non-zero. 477 * 478 * Return: 479 * * %true if @device_addr is device page aligned; @size is device page 480 * aligned; the range specified by @device_addr and @size is within the 481 * bounds of the device-virtual address space, and @size is non-zero, or 482 * * %false otherwise. 483 */ 484 bool 485 pvr_device_addr_and_size_are_valid(struct pvr_vm_context *vm_ctx, 486 u64 device_addr, u64 size) 487 { 488 return pvr_device_addr_is_valid(device_addr) && 489 drm_gpuvm_range_valid(&vm_ctx->gpuvm_mgr, device_addr, size) && 490 size != 0 && (size & ~PVR_DEVICE_PAGE_MASK) == 0 && 491 (device_addr + size <= PVR_PAGE_TABLE_ADDR_SPACE_SIZE); 492 } 493 494 static void pvr_gpuvm_free(struct drm_gpuvm *gpuvm) 495 { 496 kfree(to_pvr_vm_context(gpuvm)); 497 } 498 499 static const struct drm_gpuvm_ops pvr_vm_gpuva_ops = { 500 .vm_free = pvr_gpuvm_free, 501 .sm_step_map = pvr_vm_gpuva_map, 502 .sm_step_remap = pvr_vm_gpuva_remap, 503 .sm_step_unmap = pvr_vm_gpuva_unmap, 504 }; 505 506 static void 507 fw_mem_context_init(void *cpu_ptr, void *priv) 508 { 509 struct rogue_fwif_fwmemcontext *fw_mem_ctx = cpu_ptr; 510 struct pvr_vm_context *vm_ctx = priv; 511 512 fw_mem_ctx->pc_dev_paddr = pvr_vm_get_page_table_root_addr(vm_ctx); 513 fw_mem_ctx->page_cat_base_reg_set = ROGUE_FW_BIF_INVALID_PCSET; 514 } 515 516 /** 517 * pvr_vm_create_context() - Create a new VM context. 518 * @pvr_dev: Target PowerVR device. 519 * @is_userspace_context: %true if this context is for userspace. This will 520 * create a firmware memory context for the VM context 521 * and disable warnings when tearing down mappings. 522 * 523 * Return: 524 * * A handle to the newly-minted VM context on success, 525 * * -%EINVAL if the feature "virtual address space bits" on @pvr_dev is 526 * missing or has an unsupported value, 527 * * -%ENOMEM if allocation of the structure behind the opaque handle fails, 528 * or 529 * * Any error encountered while setting up internal structures. 530 */ 531 struct pvr_vm_context * 532 pvr_vm_create_context(struct pvr_device *pvr_dev, bool is_userspace_context) 533 { 534 struct drm_device *drm_dev = from_pvr_device(pvr_dev); 535 536 struct pvr_vm_context *vm_ctx; 537 u16 device_addr_bits; 538 539 int err; 540 541 err = PVR_FEATURE_VALUE(pvr_dev, virtual_address_space_bits, 542 &device_addr_bits); 543 if (err) { 544 drm_err(drm_dev, 545 "Failed to get device virtual address space bits\n"); 546 return ERR_PTR(err); 547 } 548 549 if (device_addr_bits != PVR_PAGE_TABLE_ADDR_BITS) { 550 drm_err(drm_dev, 551 "Device has unsupported virtual address space size\n"); 552 return ERR_PTR(-EINVAL); 553 } 554 555 vm_ctx = kzalloc(sizeof(*vm_ctx), GFP_KERNEL); 556 if (!vm_ctx) 557 return ERR_PTR(-ENOMEM); 558 559 drm_gem_private_object_init(&pvr_dev->base, &vm_ctx->dummy_gem, 0); 560 561 vm_ctx->pvr_dev = pvr_dev; 562 kref_init(&vm_ctx->ref_count); 563 mutex_init(&vm_ctx->lock); 564 565 drm_gpuvm_init(&vm_ctx->gpuvm_mgr, 566 is_userspace_context ? "PowerVR-user-VM" : "PowerVR-FW-VM", 567 0, &pvr_dev->base, &vm_ctx->dummy_gem, 568 0, 1ULL << device_addr_bits, 0, 0, &pvr_vm_gpuva_ops); 569 570 vm_ctx->mmu_ctx = pvr_mmu_context_create(pvr_dev); 571 err = PTR_ERR_OR_ZERO(&vm_ctx->mmu_ctx); 572 if (err) { 573 vm_ctx->mmu_ctx = NULL; 574 goto err_put_ctx; 575 } 576 577 if (is_userspace_context) { 578 err = pvr_fw_object_create(pvr_dev, sizeof(struct rogue_fwif_fwmemcontext), 579 PVR_BO_FW_FLAGS_DEVICE_UNCACHED, 580 fw_mem_context_init, vm_ctx, &vm_ctx->fw_mem_ctx_obj); 581 582 if (err) 583 goto err_page_table_destroy; 584 } 585 586 return vm_ctx; 587 588 err_page_table_destroy: 589 pvr_mmu_context_destroy(vm_ctx->mmu_ctx); 590 591 err_put_ctx: 592 pvr_vm_context_put(vm_ctx); 593 594 return ERR_PTR(err); 595 } 596 597 /** 598 * pvr_vm_context_release() - Teardown a VM context. 599 * @ref_count: Pointer to reference counter of the VM context. 600 * 601 * This function ensures that no mappings are left dangling by unmapping them 602 * all in order of ascending device-virtual address. 603 */ 604 static void 605 pvr_vm_context_release(struct kref *ref_count) 606 { 607 struct pvr_vm_context *vm_ctx = 608 container_of(ref_count, struct pvr_vm_context, ref_count); 609 610 if (vm_ctx->fw_mem_ctx_obj) 611 pvr_fw_object_destroy(vm_ctx->fw_mem_ctx_obj); 612 613 WARN_ON(pvr_vm_unmap(vm_ctx, vm_ctx->gpuvm_mgr.mm_start, 614 vm_ctx->gpuvm_mgr.mm_range)); 615 616 pvr_mmu_context_destroy(vm_ctx->mmu_ctx); 617 drm_gem_private_object_fini(&vm_ctx->dummy_gem); 618 mutex_destroy(&vm_ctx->lock); 619 620 drm_gpuvm_put(&vm_ctx->gpuvm_mgr); 621 } 622 623 /** 624 * pvr_vm_context_lookup() - Look up VM context from handle 625 * @pvr_file: Pointer to pvr_file structure. 626 * @handle: Object handle. 627 * 628 * Takes reference on VM context object. Call pvr_vm_context_put() to release. 629 * 630 * Returns: 631 * * The requested object on success, or 632 * * %NULL on failure (object does not exist in list, or is not a VM context) 633 */ 634 struct pvr_vm_context * 635 pvr_vm_context_lookup(struct pvr_file *pvr_file, u32 handle) 636 { 637 struct pvr_vm_context *vm_ctx; 638 639 xa_lock(&pvr_file->vm_ctx_handles); 640 vm_ctx = xa_load(&pvr_file->vm_ctx_handles, handle); 641 if (vm_ctx) 642 kref_get(&vm_ctx->ref_count); 643 644 xa_unlock(&pvr_file->vm_ctx_handles); 645 646 return vm_ctx; 647 } 648 649 /** 650 * pvr_vm_context_put() - Release a reference on a VM context 651 * @vm_ctx: Target VM context. 652 * 653 * Returns: 654 * * %true if the VM context was destroyed, or 655 * * %false if there are any references still remaining. 656 */ 657 bool 658 pvr_vm_context_put(struct pvr_vm_context *vm_ctx) 659 { 660 if (vm_ctx) 661 return kref_put(&vm_ctx->ref_count, pvr_vm_context_release); 662 663 return true; 664 } 665 666 /** 667 * pvr_destroy_vm_contexts_for_file: Destroy any VM contexts associated with the 668 * given file. 669 * @pvr_file: Pointer to pvr_file structure. 670 * 671 * Removes all vm_contexts associated with @pvr_file from the device VM context 672 * list and drops initial references. vm_contexts will then be destroyed once 673 * all outstanding references are dropped. 674 */ 675 void pvr_destroy_vm_contexts_for_file(struct pvr_file *pvr_file) 676 { 677 struct pvr_vm_context *vm_ctx; 678 unsigned long handle; 679 680 xa_for_each(&pvr_file->vm_ctx_handles, handle, vm_ctx) { 681 /* vm_ctx is not used here because that would create a race with xa_erase */ 682 pvr_vm_context_put(xa_erase(&pvr_file->vm_ctx_handles, handle)); 683 } 684 } 685 686 static int 687 pvr_vm_lock_extra(struct drm_gpuvm_exec *vm_exec) 688 { 689 struct pvr_vm_bind_op *bind_op = vm_exec->extra.priv; 690 struct pvr_gem_object *pvr_obj = bind_op->pvr_obj; 691 692 /* Unmap operations don't have an object to lock. */ 693 if (!pvr_obj) 694 return 0; 695 696 /* Acquire lock on the GEM being mapped. */ 697 return drm_exec_lock_obj(&vm_exec->exec, gem_from_pvr_gem(pvr_obj)); 698 } 699 700 /** 701 * pvr_vm_map() - Map a section of physical memory into a section of 702 * device-virtual memory. 703 * @vm_ctx: Target VM context. 704 * @pvr_obj: Target PowerVR memory object. 705 * @pvr_obj_offset: Offset into @pvr_obj to map from. 706 * @device_addr: Virtual device address at the start of the requested mapping. 707 * @size: Size of the requested mapping. 708 * 709 * No handle is returned to represent the mapping. Instead, callers should 710 * remember @device_addr and use that as a handle. 711 * 712 * Return: 713 * * 0 on success, 714 * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual 715 * address; the region specified by @pvr_obj_offset and @size does not fall 716 * entirely within @pvr_obj, or any part of the specified region of @pvr_obj 717 * is not device-virtual page-aligned, 718 * * Any error encountered while performing internal operations required to 719 * destroy the mapping (returned from pvr_vm_gpuva_map or 720 * pvr_vm_gpuva_remap). 721 */ 722 int 723 pvr_vm_map(struct pvr_vm_context *vm_ctx, struct pvr_gem_object *pvr_obj, 724 u64 pvr_obj_offset, u64 device_addr, u64 size) 725 { 726 struct pvr_vm_bind_op bind_op = {0}; 727 struct drm_gpuvm_exec vm_exec = { 728 .vm = &vm_ctx->gpuvm_mgr, 729 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT | 730 DRM_EXEC_IGNORE_DUPLICATES, 731 .extra = { 732 .fn = pvr_vm_lock_extra, 733 .priv = &bind_op, 734 }, 735 }; 736 737 int err = pvr_vm_bind_op_map_init(&bind_op, vm_ctx, pvr_obj, 738 pvr_obj_offset, device_addr, 739 size); 740 741 if (err) 742 return err; 743 744 pvr_gem_object_get(pvr_obj); 745 746 err = drm_gpuvm_exec_lock(&vm_exec); 747 if (err) 748 goto err_cleanup; 749 750 err = pvr_vm_bind_op_exec(&bind_op); 751 752 drm_gpuvm_exec_unlock(&vm_exec); 753 754 err_cleanup: 755 pvr_vm_bind_op_fini(&bind_op); 756 757 return err; 758 } 759 760 /** 761 * pvr_vm_unmap() - Unmap an already mapped section of device-virtual memory. 762 * @vm_ctx: Target VM context. 763 * @device_addr: Virtual device address at the start of the target mapping. 764 * @size: Size of the target mapping. 765 * 766 * Return: 767 * * 0 on success, 768 * * -%EINVAL if @device_addr is not a valid page-aligned device-virtual 769 * address, 770 * * Any error encountered while performing internal operations required to 771 * destroy the mapping (returned from pvr_vm_gpuva_unmap or 772 * pvr_vm_gpuva_remap). 773 */ 774 int 775 pvr_vm_unmap(struct pvr_vm_context *vm_ctx, u64 device_addr, u64 size) 776 { 777 struct pvr_vm_bind_op bind_op = {0}; 778 struct drm_gpuvm_exec vm_exec = { 779 .vm = &vm_ctx->gpuvm_mgr, 780 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT | 781 DRM_EXEC_IGNORE_DUPLICATES, 782 .extra = { 783 .fn = pvr_vm_lock_extra, 784 .priv = &bind_op, 785 }, 786 }; 787 788 int err = pvr_vm_bind_op_unmap_init(&bind_op, vm_ctx, device_addr, 789 size); 790 if (err) 791 return err; 792 793 err = drm_gpuvm_exec_lock(&vm_exec); 794 if (err) 795 goto err_cleanup; 796 797 err = pvr_vm_bind_op_exec(&bind_op); 798 799 drm_gpuvm_exec_unlock(&vm_exec); 800 801 err_cleanup: 802 pvr_vm_bind_op_fini(&bind_op); 803 804 return err; 805 } 806 807 /* Static data areas are determined by firmware. */ 808 static const struct drm_pvr_static_data_area static_data_areas[] = { 809 { 810 .area_usage = DRM_PVR_STATIC_DATA_AREA_FENCE, 811 .location_heap_id = DRM_PVR_HEAP_GENERAL, 812 .offset = 0, 813 .size = 128, 814 }, 815 { 816 .area_usage = DRM_PVR_STATIC_DATA_AREA_YUV_CSC, 817 .location_heap_id = DRM_PVR_HEAP_GENERAL, 818 .offset = 128, 819 .size = 1024, 820 }, 821 { 822 .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC, 823 .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA, 824 .offset = 0, 825 .size = 128, 826 }, 827 { 828 .area_usage = DRM_PVR_STATIC_DATA_AREA_EOT, 829 .location_heap_id = DRM_PVR_HEAP_PDS_CODE_DATA, 830 .offset = 128, 831 .size = 128, 832 }, 833 { 834 .area_usage = DRM_PVR_STATIC_DATA_AREA_VDM_SYNC, 835 .location_heap_id = DRM_PVR_HEAP_USC_CODE, 836 .offset = 0, 837 .size = 128, 838 }, 839 }; 840 841 #define GET_RESERVED_SIZE(last_offset, last_size) round_up((last_offset) + (last_size), PAGE_SIZE) 842 843 /* 844 * The values given to GET_RESERVED_SIZE() are taken from the last entry in the corresponding 845 * static data area for each heap. 846 */ 847 static const struct drm_pvr_heap pvr_heaps[] = { 848 [DRM_PVR_HEAP_GENERAL] = { 849 .base = ROGUE_GENERAL_HEAP_BASE, 850 .size = ROGUE_GENERAL_HEAP_SIZE, 851 .flags = 0, 852 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, 853 }, 854 [DRM_PVR_HEAP_PDS_CODE_DATA] = { 855 .base = ROGUE_PDSCODEDATA_HEAP_BASE, 856 .size = ROGUE_PDSCODEDATA_HEAP_SIZE, 857 .flags = 0, 858 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, 859 }, 860 [DRM_PVR_HEAP_USC_CODE] = { 861 .base = ROGUE_USCCODE_HEAP_BASE, 862 .size = ROGUE_USCCODE_HEAP_SIZE, 863 .flags = 0, 864 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, 865 }, 866 [DRM_PVR_HEAP_RGNHDR] = { 867 .base = ROGUE_RGNHDR_HEAP_BASE, 868 .size = ROGUE_RGNHDR_HEAP_SIZE, 869 .flags = 0, 870 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, 871 }, 872 [DRM_PVR_HEAP_VIS_TEST] = { 873 .base = ROGUE_VISTEST_HEAP_BASE, 874 .size = ROGUE_VISTEST_HEAP_SIZE, 875 .flags = 0, 876 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, 877 }, 878 [DRM_PVR_HEAP_TRANSFER_FRAG] = { 879 .base = ROGUE_TRANSFER_FRAG_HEAP_BASE, 880 .size = ROGUE_TRANSFER_FRAG_HEAP_SIZE, 881 .flags = 0, 882 .page_size_log2 = PVR_DEVICE_PAGE_SHIFT, 883 }, 884 }; 885 886 int 887 pvr_static_data_areas_get(const struct pvr_device *pvr_dev, 888 struct drm_pvr_ioctl_dev_query_args *args) 889 { 890 struct drm_pvr_dev_query_static_data_areas query = {0}; 891 int err; 892 893 if (!args->pointer) { 894 args->size = sizeof(struct drm_pvr_dev_query_static_data_areas); 895 return 0; 896 } 897 898 err = PVR_UOBJ_GET(query, args->size, args->pointer); 899 if (err < 0) 900 return err; 901 902 if (!query.static_data_areas.array) { 903 query.static_data_areas.count = ARRAY_SIZE(static_data_areas); 904 query.static_data_areas.stride = sizeof(struct drm_pvr_static_data_area); 905 goto copy_out; 906 } 907 908 if (query.static_data_areas.count > ARRAY_SIZE(static_data_areas)) 909 query.static_data_areas.count = ARRAY_SIZE(static_data_areas); 910 911 err = PVR_UOBJ_SET_ARRAY(&query.static_data_areas, static_data_areas); 912 if (err < 0) 913 return err; 914 915 copy_out: 916 err = PVR_UOBJ_SET(args->pointer, args->size, query); 917 if (err < 0) 918 return err; 919 920 args->size = sizeof(query); 921 return 0; 922 } 923 924 int 925 pvr_heap_info_get(const struct pvr_device *pvr_dev, 926 struct drm_pvr_ioctl_dev_query_args *args) 927 { 928 struct drm_pvr_dev_query_heap_info query = {0}; 929 u64 dest; 930 int err; 931 932 if (!args->pointer) { 933 args->size = sizeof(struct drm_pvr_dev_query_heap_info); 934 return 0; 935 } 936 937 err = PVR_UOBJ_GET(query, args->size, args->pointer); 938 if (err < 0) 939 return err; 940 941 if (!query.heaps.array) { 942 query.heaps.count = ARRAY_SIZE(pvr_heaps); 943 query.heaps.stride = sizeof(struct drm_pvr_heap); 944 goto copy_out; 945 } 946 947 if (query.heaps.count > ARRAY_SIZE(pvr_heaps)) 948 query.heaps.count = ARRAY_SIZE(pvr_heaps); 949 950 /* Region header heap is only present if BRN63142 is present. */ 951 dest = query.heaps.array; 952 for (size_t i = 0; i < query.heaps.count; i++) { 953 struct drm_pvr_heap heap = pvr_heaps[i]; 954 955 if (i == DRM_PVR_HEAP_RGNHDR && !PVR_HAS_QUIRK(pvr_dev, 63142)) 956 heap.size = 0; 957 958 err = PVR_UOBJ_SET(dest, query.heaps.stride, heap); 959 if (err < 0) 960 return err; 961 962 dest += query.heaps.stride; 963 } 964 965 copy_out: 966 err = PVR_UOBJ_SET(args->pointer, args->size, query); 967 if (err < 0) 968 return err; 969 970 args->size = sizeof(query); 971 return 0; 972 } 973 974 /** 975 * pvr_heap_contains_range() - Determine if a given heap contains the specified 976 * device-virtual address range. 977 * @pvr_heap: Target heap. 978 * @start: Inclusive start of the target range. 979 * @end: Inclusive end of the target range. 980 * 981 * It is an error to call this function with values of @start and @end that do 982 * not satisfy the condition @start <= @end. 983 */ 984 static __always_inline bool 985 pvr_heap_contains_range(const struct drm_pvr_heap *pvr_heap, u64 start, u64 end) 986 { 987 return pvr_heap->base <= start && end < pvr_heap->base + pvr_heap->size; 988 } 989 990 /** 991 * pvr_find_heap_containing() - Find a heap which contains the specified 992 * device-virtual address range. 993 * @pvr_dev: Target PowerVR device. 994 * @start: Start of the target range. 995 * @size: Size of the target range. 996 * 997 * Return: 998 * * A pointer to a constant instance of struct drm_pvr_heap representing the 999 * heap containing the entire range specified by @start and @size on 1000 * success, or 1001 * * %NULL if no such heap exists. 1002 */ 1003 const struct drm_pvr_heap * 1004 pvr_find_heap_containing(struct pvr_device *pvr_dev, u64 start, u64 size) 1005 { 1006 u64 end; 1007 1008 if (check_add_overflow(start, size - 1, &end)) 1009 return NULL; 1010 1011 /* 1012 * There are no guarantees about the order of address ranges in 1013 * &pvr_heaps, so iterate over the entire array for a heap whose 1014 * range completely encompasses the given range. 1015 */ 1016 for (u32 heap_id = 0; heap_id < ARRAY_SIZE(pvr_heaps); heap_id++) { 1017 /* Filter heaps that present only with an associated quirk */ 1018 if (heap_id == DRM_PVR_HEAP_RGNHDR && 1019 !PVR_HAS_QUIRK(pvr_dev, 63142)) { 1020 continue; 1021 } 1022 1023 if (pvr_heap_contains_range(&pvr_heaps[heap_id], start, end)) 1024 return &pvr_heaps[heap_id]; 1025 } 1026 1027 return NULL; 1028 } 1029 1030 /** 1031 * pvr_vm_find_gem_object() - Look up a buffer object from a given 1032 * device-virtual address. 1033 * @vm_ctx: [IN] Target VM context. 1034 * @device_addr: [IN] Virtual device address at the start of the required 1035 * object. 1036 * @mapped_offset_out: [OUT] Pointer to location to write offset of the start 1037 * of the mapped region within the buffer object. May be 1038 * %NULL if this information is not required. 1039 * @mapped_size_out: [OUT] Pointer to location to write size of the mapped 1040 * region. May be %NULL if this information is not required. 1041 * 1042 * If successful, a reference will be taken on the buffer object. The caller 1043 * must drop the reference with pvr_gem_object_put(). 1044 * 1045 * Return: 1046 * * The PowerVR buffer object mapped at @device_addr if one exists, or 1047 * * %NULL otherwise. 1048 */ 1049 struct pvr_gem_object * 1050 pvr_vm_find_gem_object(struct pvr_vm_context *vm_ctx, u64 device_addr, 1051 u64 *mapped_offset_out, u64 *mapped_size_out) 1052 { 1053 struct pvr_gem_object *pvr_obj; 1054 struct drm_gpuva *va; 1055 1056 mutex_lock(&vm_ctx->lock); 1057 1058 va = drm_gpuva_find_first(&vm_ctx->gpuvm_mgr, device_addr, 1); 1059 if (!va) 1060 goto err_unlock; 1061 1062 pvr_obj = gem_to_pvr_gem(va->gem.obj); 1063 pvr_gem_object_get(pvr_obj); 1064 1065 if (mapped_offset_out) 1066 *mapped_offset_out = va->gem.offset; 1067 if (mapped_size_out) 1068 *mapped_size_out = va->va.range; 1069 1070 mutex_unlock(&vm_ctx->lock); 1071 1072 return pvr_obj; 1073 1074 err_unlock: 1075 mutex_unlock(&vm_ctx->lock); 1076 1077 return NULL; 1078 } 1079 1080 /** 1081 * pvr_vm_get_fw_mem_context: Get object representing firmware memory context 1082 * @vm_ctx: Target VM context. 1083 * 1084 * Returns: 1085 * * FW object representing firmware memory context, or 1086 * * %NULL if this VM context does not have a firmware memory context. 1087 */ 1088 struct pvr_fw_object * 1089 pvr_vm_get_fw_mem_context(struct pvr_vm_context *vm_ctx) 1090 { 1091 return vm_ctx->fw_mem_ctx_obj; 1092 } 1093