1 // SPDX-License-Identifier: GPL-2.0 or MIT 2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ 3 /* Copyright 2023 Collabora ltd. */ 4 5 #include <drm/drm_debugfs.h> 6 #include <drm/drm_drv.h> 7 #include <drm/drm_exec.h> 8 #include <drm/drm_gpuvm.h> 9 #include <drm/drm_managed.h> 10 #include <drm/drm_print.h> 11 #include <drm/gpu_scheduler.h> 12 #include <drm/panthor_drm.h> 13 14 #include <linux/atomic.h> 15 #include <linux/bitfield.h> 16 #include <linux/delay.h> 17 #include <linux/dma-mapping.h> 18 #include <linux/interrupt.h> 19 #include <linux/io.h> 20 #include <linux/iopoll.h> 21 #include <linux/io-pgtable.h> 22 #include <linux/iommu.h> 23 #include <linux/kmemleak.h> 24 #include <linux/platform_device.h> 25 #include <linux/pm_runtime.h> 26 #include <linux/rwsem.h> 27 #include <linux/sched.h> 28 #include <linux/shmem_fs.h> 29 #include <linux/sizes.h> 30 31 #include "panthor_device.h" 32 #include "panthor_gem.h" 33 #include "panthor_gpu.h" 34 #include "panthor_heap.h" 35 #include "panthor_mmu.h" 36 #include "panthor_regs.h" 37 #include "panthor_sched.h" 38 39 #define MAX_AS_SLOTS 32 40 41 struct panthor_vm; 42 43 /** 44 * struct panthor_as_slot - Address space slot 45 */ 46 struct panthor_as_slot { 47 /** @vm: VM bound to this slot. NULL is no VM is bound. */ 48 struct panthor_vm *vm; 49 }; 50 51 /** 52 * struct panthor_mmu - MMU related data 53 */ 54 struct panthor_mmu { 55 /** @irq: The MMU irq. */ 56 struct panthor_irq irq; 57 58 /** 59 * @as: Address space related fields. 60 * 61 * The GPU has a limited number of address spaces (AS) slots, forcing 62 * us to re-assign them to re-assign slots on-demand. 63 */ 64 struct { 65 /** @as.slots_lock: Lock protecting access to all other AS fields. */ 66 struct mutex slots_lock; 67 68 /** @as.alloc_mask: Bitmask encoding the allocated slots. */ 69 unsigned long alloc_mask; 70 71 /** @as.faulty_mask: Bitmask encoding the faulty slots. */ 72 unsigned long faulty_mask; 73 74 /** @as.slots: VMs currently bound to the AS slots. */ 75 struct panthor_as_slot slots[MAX_AS_SLOTS]; 76 77 /** 78 * @as.lru_list: List of least recently used VMs. 79 * 80 * We use this list to pick a VM to evict when all slots are 81 * used. 82 * 83 * There should be no more active VMs than there are AS slots, 84 * so this LRU is just here to keep VMs bound until there's 85 * a need to release a slot, thus avoid unnecessary TLB/cache 86 * flushes. 87 */ 88 struct list_head lru_list; 89 } as; 90 91 /** @vm: VMs management fields */ 92 struct { 93 /** @vm.lock: Lock protecting access to list. */ 94 struct mutex lock; 95 96 /** @vm.list: List containing all VMs. */ 97 struct list_head list; 98 99 /** @vm.reset_in_progress: True if a reset is in progress. */ 100 bool reset_in_progress; 101 102 /** @vm.wq: Workqueue used for the VM_BIND queues. */ 103 struct workqueue_struct *wq; 104 } vm; 105 }; 106 107 /** 108 * struct panthor_vm_pool - VM pool object 109 */ 110 struct panthor_vm_pool { 111 /** @xa: Array used for VM handle tracking. */ 112 struct xarray xa; 113 }; 114 115 /** 116 * struct panthor_vma - GPU mapping object 117 * 118 * This is used to track GEM mappings in GPU space. 119 */ 120 struct panthor_vma { 121 /** @base: Inherits from drm_gpuva. */ 122 struct drm_gpuva base; 123 124 /** @node: Used to implement deferred release of VMAs. */ 125 struct list_head node; 126 127 /** 128 * @flags: Combination of drm_panthor_vm_bind_op_flags. 129 * 130 * Only map related flags are accepted. 131 */ 132 u32 flags; 133 }; 134 135 /** 136 * struct panthor_vm_op_ctx - VM operation context 137 * 138 * With VM operations potentially taking place in a dma-signaling path, we 139 * need to make sure everything that might require resource allocation is 140 * pre-allocated upfront. This is what this operation context is far. 141 * 142 * We also collect resources that have been freed, so we can release them 143 * asynchronously, and let the VM_BIND scheduler process the next VM_BIND 144 * request. 145 */ 146 struct panthor_vm_op_ctx { 147 /** @rsvd_page_tables: Pages reserved for the MMU page table update. */ 148 struct { 149 /** @rsvd_page_tables.count: Number of pages reserved. */ 150 u32 count; 151 152 /** @rsvd_page_tables.ptr: Point to the first unused page in the @pages table. */ 153 u32 ptr; 154 155 /** 156 * @rsvd_page_tables.pages: Array of pages to be used for an MMU page table update. 157 * 158 * After an VM operation, there might be free pages left in this array. 159 * They should be returned to the pt_cache as part of the op_ctx cleanup. 160 */ 161 void **pages; 162 } rsvd_page_tables; 163 164 /** 165 * @preallocated_vmas: Pre-allocated VMAs to handle the remap case. 166 * 167 * Partial unmap requests or map requests overlapping existing mappings will 168 * trigger a remap call, which need to register up to three panthor_vma objects 169 * (one for the new mapping, and two for the previous and next mappings). 170 */ 171 struct panthor_vma *preallocated_vmas[3]; 172 173 /** @flags: Combination of drm_panthor_vm_bind_op_flags. */ 174 u32 flags; 175 176 /** @va: Virtual range targeted by the VM operation. */ 177 struct { 178 /** @va.addr: Start address. */ 179 u64 addr; 180 181 /** @va.range: Range size. */ 182 u64 range; 183 } va; 184 185 /** @map: Fields specific to a map operation. */ 186 struct { 187 /** @map.vm_bo: Buffer object to map. */ 188 struct drm_gpuvm_bo *vm_bo; 189 190 /** @map.bo_offset: Offset in the buffer object. */ 191 u64 bo_offset; 192 193 /** 194 * @map.sgt: sg-table pointing to pages backing the GEM object. 195 * 196 * This is gathered at job creation time, such that we don't have 197 * to allocate in ::run_job(). 198 */ 199 struct sg_table *sgt; 200 201 /** 202 * @map.new_vma: The new VMA object that will be inserted to the VA tree. 203 */ 204 struct panthor_vma *new_vma; 205 } map; 206 }; 207 208 /** 209 * struct panthor_vm - VM object 210 * 211 * A VM is an object representing a GPU (or MCU) virtual address space. 212 * It embeds the MMU page table for this address space, a tree containing 213 * all the virtual mappings of GEM objects, and other things needed to manage 214 * the VM. 215 * 216 * Except for the MCU VM, which is managed by the kernel, all other VMs are 217 * created by userspace and mostly managed by userspace, using the 218 * %DRM_IOCTL_PANTHOR_VM_BIND ioctl. 219 * 220 * A portion of the virtual address space is reserved for kernel objects, 221 * like heap chunks, and userspace gets to decide how much of the virtual 222 * address space is left to the kernel (half of the virtual address space 223 * by default). 224 */ 225 struct panthor_vm { 226 /** 227 * @base: Inherit from drm_gpuvm. 228 * 229 * We delegate all the VA management to the common drm_gpuvm framework 230 * and only implement hooks to update the MMU page table. 231 */ 232 struct drm_gpuvm base; 233 234 /** 235 * @sched: Scheduler used for asynchronous VM_BIND request. 236 * 237 * We use a 1:1 scheduler here. 238 */ 239 struct drm_gpu_scheduler sched; 240 241 /** 242 * @entity: Scheduling entity representing the VM_BIND queue. 243 * 244 * There's currently one bind queue per VM. It doesn't make sense to 245 * allow more given the VM operations are serialized anyway. 246 */ 247 struct drm_sched_entity entity; 248 249 /** @ptdev: Device. */ 250 struct panthor_device *ptdev; 251 252 /** @memattr: Value to program to the AS_MEMATTR register. */ 253 u64 memattr; 254 255 /** @pgtbl_ops: Page table operations. */ 256 struct io_pgtable_ops *pgtbl_ops; 257 258 /** @root_page_table: Stores the root page table pointer. */ 259 void *root_page_table; 260 261 /** 262 * @op_lock: Lock used to serialize operations on a VM. 263 * 264 * The serialization of jobs queued to the VM_BIND queue is already 265 * taken care of by drm_sched, but we need to serialize synchronous 266 * and asynchronous VM_BIND request. This is what this lock is for. 267 */ 268 struct mutex op_lock; 269 270 /** 271 * @op_ctx: The context attached to the currently executing VM operation. 272 * 273 * NULL when no operation is in progress. 274 */ 275 struct panthor_vm_op_ctx *op_ctx; 276 277 /** 278 * @mm: Memory management object representing the auto-VA/kernel-VA. 279 * 280 * Used to auto-allocate VA space for kernel-managed objects (tiler 281 * heaps, ...). 282 * 283 * For the MCU VM, this is managing the VA range that's used to map 284 * all shared interfaces. 285 * 286 * For user VMs, the range is specified by userspace, and must not 287 * exceed half of the VA space addressable. 288 */ 289 struct drm_mm mm; 290 291 /** @mm_lock: Lock protecting the @mm field. */ 292 struct mutex mm_lock; 293 294 /** @kernel_auto_va: Automatic VA-range for kernel BOs. */ 295 struct { 296 /** @kernel_auto_va.start: Start of the automatic VA-range for kernel BOs. */ 297 u64 start; 298 299 /** @kernel_auto_va.size: Size of the automatic VA-range for kernel BOs. */ 300 u64 end; 301 } kernel_auto_va; 302 303 /** @as: Address space related fields. */ 304 struct { 305 /** 306 * @as.id: ID of the address space this VM is bound to. 307 * 308 * A value of -1 means the VM is inactive/not bound. 309 */ 310 int id; 311 312 /** @as.active_cnt: Number of active users of this VM. */ 313 refcount_t active_cnt; 314 315 /** 316 * @as.lru_node: Used to instead the VM in the panthor_mmu::as::lru_list. 317 * 318 * Active VMs should not be inserted in the LRU list. 319 */ 320 struct list_head lru_node; 321 } as; 322 323 /** 324 * @heaps: Tiler heap related fields. 325 */ 326 struct { 327 /** 328 * @heaps.pool: The heap pool attached to this VM. 329 * 330 * Will stay NULL until someone creates a heap context on this VM. 331 */ 332 struct panthor_heap_pool *pool; 333 334 /** @heaps.lock: Lock used to protect access to @pool. */ 335 struct mutex lock; 336 } heaps; 337 338 /** @node: Used to insert the VM in the panthor_mmu::vm::list. */ 339 struct list_head node; 340 341 /** @for_mcu: True if this is the MCU VM. */ 342 bool for_mcu; 343 344 /** 345 * @destroyed: True if the VM was destroyed. 346 * 347 * No further bind requests should be queued to a destroyed VM. 348 */ 349 bool destroyed; 350 351 /** 352 * @unusable: True if the VM has turned unusable because something 353 * bad happened during an asynchronous request. 354 * 355 * We don't try to recover from such failures, because this implies 356 * informing userspace about the specific operation that failed, and 357 * hoping the userspace driver can replay things from there. This all 358 * sounds very complicated for little gain. 359 * 360 * Instead, we should just flag the VM as unusable, and fail any 361 * further request targeting this VM. 362 * 363 * We also provide a way to query a VM state, so userspace can destroy 364 * it and create a new one. 365 * 366 * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST 367 * situation, where the logical device needs to be re-created. 368 */ 369 bool unusable; 370 371 /** 372 * @unhandled_fault: Unhandled fault happened. 373 * 374 * This should be reported to the scheduler, and the queue/group be 375 * flagged as faulty as a result. 376 */ 377 bool unhandled_fault; 378 379 /** @locked_region: Information about the currently locked region currently. */ 380 struct { 381 /** @locked_region.start: Start of the locked region. */ 382 u64 start; 383 384 /** @locked_region.size: Size of the locked region. */ 385 u64 size; 386 } locked_region; 387 }; 388 389 /** 390 * struct panthor_vm_bind_job - VM bind job 391 */ 392 struct panthor_vm_bind_job { 393 /** @base: Inherit from drm_sched_job. */ 394 struct drm_sched_job base; 395 396 /** @refcount: Reference count. */ 397 struct kref refcount; 398 399 /** @cleanup_op_ctx_work: Work used to cleanup the VM operation context. */ 400 struct work_struct cleanup_op_ctx_work; 401 402 /** @vm: VM targeted by the VM operation. */ 403 struct panthor_vm *vm; 404 405 /** @ctx: Operation context. */ 406 struct panthor_vm_op_ctx ctx; 407 }; 408 409 /* 410 * @pt_cache: Cache used to allocate MMU page tables. 411 * 412 * The pre-allocation pattern forces us to over-allocate to plan for 413 * the worst case scenario, and return the pages we didn't use. 414 * 415 * Having a kmem_cache allows us to speed allocations. 416 */ 417 static struct kmem_cache *pt_cache; 418 419 /** 420 * alloc_pt() - Custom page table allocator 421 * @cookie: Cookie passed at page table allocation time. 422 * @size: Size of the page table. This size should be fixed, 423 * and determined at creation time based on the granule size. 424 * @gfp: GFP flags. 425 * 426 * We want a custom allocator so we can use a cache for page table 427 * allocations and amortize the cost of the over-reservation that's 428 * done to allow asynchronous VM operations. 429 * 430 * Return: non-NULL on success, NULL if the allocation failed for any 431 * reason. 432 */ 433 static void *alloc_pt(void *cookie, size_t size, gfp_t gfp) 434 { 435 struct panthor_vm *vm = cookie; 436 void *page; 437 438 /* Allocation of the root page table happening during init. */ 439 if (unlikely(!vm->root_page_table)) { 440 struct page *p; 441 442 drm_WARN_ON(&vm->ptdev->base, vm->op_ctx); 443 p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev), 444 gfp | __GFP_ZERO, get_order(size)); 445 page = p ? page_address(p) : NULL; 446 vm->root_page_table = page; 447 return page; 448 } 449 450 /* We're not supposed to have anything bigger than 4k here, because we picked a 451 * 4k granule size at init time. 452 */ 453 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) 454 return NULL; 455 456 /* We must have some op_ctx attached to the VM and it must have at least one 457 * free page. 458 */ 459 if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) || 460 drm_WARN_ON(&vm->ptdev->base, 461 vm->op_ctx->rsvd_page_tables.ptr >= vm->op_ctx->rsvd_page_tables.count)) 462 return NULL; 463 464 page = vm->op_ctx->rsvd_page_tables.pages[vm->op_ctx->rsvd_page_tables.ptr++]; 465 memset(page, 0, SZ_4K); 466 467 /* Page table entries don't use virtual addresses, which trips out 468 * kmemleak. kmemleak_alloc_phys() might work, but physical addresses 469 * are mixed with other fields, and I fear kmemleak won't detect that 470 * either. 471 * 472 * Let's just ignore memory passed to the page-table driver for now. 473 */ 474 kmemleak_ignore(page); 475 return page; 476 } 477 478 /** 479 * free_pt() - Custom page table free function 480 * @cookie: Cookie passed at page table allocation time. 481 * @data: Page table to free. 482 * @size: Size of the page table. This size should be fixed, 483 * and determined at creation time based on the granule size. 484 */ 485 static void free_pt(void *cookie, void *data, size_t size) 486 { 487 struct panthor_vm *vm = cookie; 488 489 if (unlikely(vm->root_page_table == data)) { 490 free_pages((unsigned long)data, get_order(size)); 491 vm->root_page_table = NULL; 492 return; 493 } 494 495 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) 496 return; 497 498 /* Return the page to the pt_cache. */ 499 kmem_cache_free(pt_cache, data); 500 } 501 502 static int wait_ready(struct panthor_device *ptdev, u32 as_nr) 503 { 504 int ret; 505 u32 val; 506 507 /* Wait for the MMU status to indicate there is no active command, in 508 * case one is pending. 509 */ 510 ret = gpu_read_relaxed_poll_timeout_atomic(ptdev, AS_STATUS(as_nr), val, 511 !(val & AS_STATUS_AS_ACTIVE), 512 10, 100000); 513 514 if (ret) { 515 panthor_device_schedule_reset(ptdev); 516 drm_err(&ptdev->base, "AS_ACTIVE bit stuck\n"); 517 } 518 519 return ret; 520 } 521 522 static int as_send_cmd_and_wait(struct panthor_device *ptdev, u32 as_nr, u32 cmd) 523 { 524 int status; 525 526 /* write AS_COMMAND when MMU is ready to accept another command */ 527 status = wait_ready(ptdev, as_nr); 528 if (!status) { 529 gpu_write(ptdev, AS_COMMAND(as_nr), cmd); 530 status = wait_ready(ptdev, as_nr); 531 } 532 533 return status; 534 } 535 536 static u64 pack_region_range(struct panthor_device *ptdev, u64 *region_start, u64 *size) 537 { 538 u8 region_width; 539 u64 region_end = *region_start + *size; 540 541 if (drm_WARN_ON_ONCE(&ptdev->base, !*size)) 542 return 0; 543 544 /* 545 * The locked region is a naturally aligned power of 2 block encoded as 546 * log2 minus(1). 547 * Calculate the desired start/end and look for the highest bit which 548 * differs. The smallest naturally aligned block must include this bit 549 * change, the desired region starts with this bit (and subsequent bits) 550 * zeroed and ends with the bit (and subsequent bits) set to one. 551 */ 552 region_width = max(fls64(*region_start ^ (region_end - 1)), 553 const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1; 554 555 /* 556 * Mask off the low bits of region_start (which would be ignored by 557 * the hardware anyway) 558 */ 559 *region_start &= GENMASK_ULL(63, region_width); 560 *size = 1ull << (region_width + 1); 561 562 return region_width | *region_start; 563 } 564 565 static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr, 566 u64 transtab, u64 transcfg, u64 memattr) 567 { 568 gpu_write64(ptdev, AS_TRANSTAB(as_nr), transtab); 569 gpu_write64(ptdev, AS_MEMATTR(as_nr), memattr); 570 gpu_write64(ptdev, AS_TRANSCFG(as_nr), transcfg); 571 572 return as_send_cmd_and_wait(ptdev, as_nr, AS_COMMAND_UPDATE); 573 } 574 575 static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr, 576 bool recycle_slot) 577 { 578 struct panthor_vm *vm = ptdev->mmu->as.slots[as_nr].vm; 579 int ret; 580 581 lockdep_assert_held(&ptdev->mmu->as.slots_lock); 582 583 /* Flush+invalidate RW caches, invalidate RO ones. */ 584 ret = panthor_gpu_flush_caches(ptdev, CACHE_CLEAN | CACHE_INV, 585 CACHE_CLEAN | CACHE_INV, CACHE_INV); 586 if (ret) 587 return ret; 588 589 if (vm && vm->locked_region.size) { 590 /* Unlock the region if there's a lock pending. */ 591 ret = as_send_cmd_and_wait(ptdev, vm->as.id, AS_COMMAND_UNLOCK); 592 if (ret) 593 return ret; 594 } 595 596 /* If the slot is going to be used immediately, don't bother changing 597 * the config. 598 */ 599 if (recycle_slot) 600 return 0; 601 602 gpu_write64(ptdev, AS_TRANSTAB(as_nr), 0); 603 gpu_write64(ptdev, AS_MEMATTR(as_nr), 0); 604 gpu_write64(ptdev, AS_TRANSCFG(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED); 605 606 return as_send_cmd_and_wait(ptdev, as_nr, AS_COMMAND_UPDATE); 607 } 608 609 static u32 panthor_mmu_fault_mask(struct panthor_device *ptdev, u32 value) 610 { 611 /* Bits 16 to 31 mean REQ_COMPLETE. */ 612 return value & GENMASK(15, 0); 613 } 614 615 static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as) 616 { 617 return BIT(as); 618 } 619 620 /** 621 * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults 622 * @vm: VM to check. 623 * 624 * Return: true if the VM has unhandled faults, false otherwise. 625 */ 626 bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm) 627 { 628 return vm->unhandled_fault; 629 } 630 631 /** 632 * panthor_vm_is_unusable() - Check if the VM is still usable 633 * @vm: VM to check. 634 * 635 * Return: true if the VM is unusable, false otherwise. 636 */ 637 bool panthor_vm_is_unusable(struct panthor_vm *vm) 638 { 639 return vm->unusable; 640 } 641 642 static void panthor_vm_release_as_locked(struct panthor_vm *vm) 643 { 644 struct panthor_device *ptdev = vm->ptdev; 645 646 lockdep_assert_held(&ptdev->mmu->as.slots_lock); 647 648 if (drm_WARN_ON(&ptdev->base, vm->as.id < 0)) 649 return; 650 651 ptdev->mmu->as.slots[vm->as.id].vm = NULL; 652 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); 653 refcount_set(&vm->as.active_cnt, 0); 654 list_del_init(&vm->as.lru_node); 655 vm->as.id = -1; 656 } 657 658 /** 659 * panthor_vm_active() - Flag a VM as active 660 * @vm: VM to flag as active. 661 * 662 * Assigns an address space to a VM so it can be used by the GPU/MCU. 663 * 664 * Return: 0 on success, a negative error code otherwise. 665 */ 666 int panthor_vm_active(struct panthor_vm *vm) 667 { 668 struct panthor_device *ptdev = vm->ptdev; 669 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features); 670 struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg; 671 int ret = 0, as, cookie; 672 u64 transtab, transcfg; 673 674 if (!drm_dev_enter(&ptdev->base, &cookie)) 675 return -ENODEV; 676 677 if (refcount_inc_not_zero(&vm->as.active_cnt)) 678 goto out_dev_exit; 679 680 /* Make sure we don't race with lock/unlock_region() calls 681 * happening around VM bind operations. 682 */ 683 mutex_lock(&vm->op_lock); 684 mutex_lock(&ptdev->mmu->as.slots_lock); 685 686 if (refcount_inc_not_zero(&vm->as.active_cnt)) 687 goto out_unlock; 688 689 as = vm->as.id; 690 if (as >= 0) { 691 /* Unhandled pagefault on this AS, the MMU was disabled. We need to 692 * re-enable the MMU after clearing+unmasking the AS interrupts. 693 */ 694 if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) 695 goto out_enable_as; 696 697 goto out_make_active; 698 } 699 700 /* Check for a free AS */ 701 if (vm->for_mcu) { 702 drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0)); 703 as = 0; 704 } else { 705 as = ffz(ptdev->mmu->as.alloc_mask | BIT(0)); 706 } 707 708 if (!(BIT(as) & ptdev->gpu_info.as_present)) { 709 struct panthor_vm *lru_vm; 710 711 lru_vm = list_first_entry_or_null(&ptdev->mmu->as.lru_list, 712 struct panthor_vm, 713 as.lru_node); 714 if (drm_WARN_ON(&ptdev->base, !lru_vm)) { 715 ret = -EBUSY; 716 goto out_unlock; 717 } 718 719 drm_WARN_ON(&ptdev->base, refcount_read(&lru_vm->as.active_cnt)); 720 as = lru_vm->as.id; 721 722 ret = panthor_mmu_as_disable(ptdev, as, true); 723 if (ret) 724 goto out_unlock; 725 726 panthor_vm_release_as_locked(lru_vm); 727 } 728 729 /* Assign the free or reclaimed AS to the FD */ 730 vm->as.id = as; 731 set_bit(as, &ptdev->mmu->as.alloc_mask); 732 ptdev->mmu->as.slots[as].vm = vm; 733 734 out_enable_as: 735 transtab = cfg->arm_lpae_s1_cfg.ttbr; 736 transcfg = AS_TRANSCFG_PTW_MEMATTR_WB | 737 AS_TRANSCFG_PTW_RA | 738 AS_TRANSCFG_ADRMODE_AARCH64_4K | 739 AS_TRANSCFG_INA_BITS(55 - va_bits); 740 if (ptdev->coherent) 741 transcfg |= AS_TRANSCFG_PTW_SH_OS; 742 743 /* If the VM is re-activated, we clear the fault. */ 744 vm->unhandled_fault = false; 745 746 /* Unhandled pagefault on this AS, clear the fault and re-enable interrupts 747 * before enabling the AS. 748 */ 749 if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) { 750 gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as)); 751 ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as); 752 ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as); 753 gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask); 754 } 755 756 /* The VM update is guarded by ::op_lock, which we take at the beginning 757 * of this function, so we don't expect any locked region here. 758 */ 759 drm_WARN_ON(&vm->ptdev->base, vm->locked_region.size > 0); 760 ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr); 761 762 out_make_active: 763 if (!ret) { 764 refcount_set(&vm->as.active_cnt, 1); 765 list_del_init(&vm->as.lru_node); 766 } 767 768 out_unlock: 769 mutex_unlock(&ptdev->mmu->as.slots_lock); 770 mutex_unlock(&vm->op_lock); 771 772 out_dev_exit: 773 drm_dev_exit(cookie); 774 return ret; 775 } 776 777 /** 778 * panthor_vm_idle() - Flag a VM idle 779 * @vm: VM to flag as idle. 780 * 781 * When we know the GPU is done with the VM (no more jobs to process), 782 * we can relinquish the AS slot attached to this VM, if any. 783 * 784 * We don't release the slot immediately, but instead place the VM in 785 * the LRU list, so it can be evicted if another VM needs an AS slot. 786 * This way, VMs keep attached to the AS they were given until we run 787 * out of free slot, limiting the number of MMU operations (TLB flush 788 * and other AS updates). 789 */ 790 void panthor_vm_idle(struct panthor_vm *vm) 791 { 792 struct panthor_device *ptdev = vm->ptdev; 793 794 if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock)) 795 return; 796 797 if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node))) 798 list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list); 799 800 refcount_set(&vm->as.active_cnt, 0); 801 mutex_unlock(&ptdev->mmu->as.slots_lock); 802 } 803 804 u32 panthor_vm_page_size(struct panthor_vm *vm) 805 { 806 const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops); 807 u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1; 808 809 return 1u << pg_shift; 810 } 811 812 static void panthor_vm_stop(struct panthor_vm *vm) 813 { 814 drm_sched_stop(&vm->sched, NULL); 815 } 816 817 static void panthor_vm_start(struct panthor_vm *vm) 818 { 819 drm_sched_start(&vm->sched, 0); 820 } 821 822 /** 823 * panthor_vm_as() - Get the AS slot attached to a VM 824 * @vm: VM to get the AS slot of. 825 * 826 * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise. 827 */ 828 int panthor_vm_as(struct panthor_vm *vm) 829 { 830 return vm->as.id; 831 } 832 833 static size_t get_pgsize(u64 addr, size_t size, size_t *count) 834 { 835 /* 836 * io-pgtable only operates on multiple pages within a single table 837 * entry, so we need to split at boundaries of the table size, i.e. 838 * the next block size up. The distance from address A to the next 839 * boundary of block size B is logically B - A % B, but in unsigned 840 * two's complement where B is a power of two we get the equivalence 841 * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :) 842 */ 843 size_t blk_offset = -addr % SZ_2M; 844 845 if (blk_offset || size < SZ_2M) { 846 *count = min_not_zero(blk_offset, size) / SZ_4K; 847 return SZ_4K; 848 } 849 blk_offset = -addr % SZ_1G ?: SZ_1G; 850 *count = min(blk_offset, size) / SZ_2M; 851 return SZ_2M; 852 } 853 854 static void panthor_vm_declare_unusable(struct panthor_vm *vm) 855 { 856 struct panthor_device *ptdev = vm->ptdev; 857 int cookie; 858 859 if (vm->unusable) 860 return; 861 862 vm->unusable = true; 863 mutex_lock(&ptdev->mmu->as.slots_lock); 864 if (vm->as.id >= 0 && drm_dev_enter(&ptdev->base, &cookie)) { 865 panthor_mmu_as_disable(ptdev, vm->as.id, false); 866 drm_dev_exit(cookie); 867 } 868 mutex_unlock(&ptdev->mmu->as.slots_lock); 869 } 870 871 static void panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) 872 { 873 struct panthor_device *ptdev = vm->ptdev; 874 struct io_pgtable_ops *ops = vm->pgtbl_ops; 875 u64 start_iova = iova; 876 u64 offset = 0; 877 878 if (!size) 879 return; 880 881 drm_WARN_ON(&ptdev->base, 882 (iova < vm->locked_region.start) || 883 (iova + size > vm->locked_region.start + vm->locked_region.size)); 884 885 while (offset < size) { 886 size_t unmapped_sz = 0, pgcount; 887 size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount); 888 889 unmapped_sz = ops->unmap_pages(ops, iova + offset, pgsize, pgcount, NULL); 890 if (drm_WARN_ON_ONCE(&ptdev->base, unmapped_sz != pgsize * pgcount)) { 891 /* Gracefully handle sparsely unmapped regions to avoid leaving 892 * page table pages behind when the drm_gpuvm and VM page table 893 * are out-of-sync. This is not supposed to happen, hence the 894 * above WARN_ON(). 895 */ 896 while (!ops->iova_to_phys(ops, iova + unmapped_sz) && 897 unmapped_sz < pgsize * pgcount) 898 unmapped_sz += SZ_4K; 899 900 /* We're passed the point where we can try to fix things, 901 * so flag the VM unusable to make sure it's not going 902 * to be used anymore. 903 */ 904 panthor_vm_declare_unusable(vm); 905 906 /* If we don't make progress, we're screwed. That also means 907 * something else prevents us from unmapping the region, but 908 * there's not much we can do here: time for debugging. 909 */ 910 if (drm_WARN_ON_ONCE(&ptdev->base, !unmapped_sz)) 911 return; 912 } 913 914 drm_dbg(&ptdev->base, 915 "unmap: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pgcnt=%zu, pgsz=%zu", 916 vm->as.id, start_iova, size, iova + offset, 917 unmapped_sz / pgsize, pgsize); 918 919 offset += unmapped_sz; 920 } 921 } 922 923 static int 924 panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, 925 struct sg_table *sgt, u64 offset, u64 size) 926 { 927 struct panthor_device *ptdev = vm->ptdev; 928 unsigned int count; 929 struct scatterlist *sgl; 930 struct io_pgtable_ops *ops = vm->pgtbl_ops; 931 u64 start_iova = iova; 932 u64 start_size = size; 933 int ret; 934 935 if (!size) 936 return 0; 937 938 drm_WARN_ON(&ptdev->base, 939 (iova < vm->locked_region.start) || 940 (iova + size > vm->locked_region.start + vm->locked_region.size)); 941 942 for_each_sgtable_dma_sg(sgt, sgl, count) { 943 dma_addr_t paddr = sg_dma_address(sgl); 944 size_t len = sg_dma_len(sgl); 945 946 if (len <= offset) { 947 offset -= len; 948 continue; 949 } 950 951 paddr += offset; 952 len -= offset; 953 len = min_t(size_t, len, size); 954 size -= len; 955 956 while (len) { 957 size_t pgcount, mapped = 0; 958 size_t pgsize = get_pgsize(iova | paddr, len, &pgcount); 959 960 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, 961 GFP_KERNEL, &mapped); 962 963 drm_dbg(&ptdev->base, 964 "map: as=%d, iova=0x%llx, sz=%llu, va=0x%llx, pa=%pad, pgcnt=%zu, pgsz=%zu", 965 vm->as.id, start_iova, start_size, iova, &paddr, 966 mapped / pgsize, pgsize); 967 968 iova += mapped; 969 paddr += mapped; 970 len -= mapped; 971 972 /* If nothing was mapped, consider it an ENOMEM. */ 973 if (!ret && !mapped) 974 ret = -ENOMEM; 975 976 /* If something fails, we stop there, and flag the VM unusable. */ 977 if (drm_WARN_ON_ONCE(&ptdev->base, ret)) { 978 /* Unmap what we've already mapped to avoid leaving page 979 * table pages behind. 980 */ 981 panthor_vm_unmap_pages(vm, start_iova, iova - start_iova); 982 panthor_vm_declare_unusable(vm); 983 return ret; 984 } 985 } 986 987 if (!size) 988 break; 989 990 offset = 0; 991 } 992 993 return 0; 994 } 995 996 static int flags_to_prot(u32 flags) 997 { 998 int prot = 0; 999 1000 if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC) 1001 prot |= IOMMU_NOEXEC; 1002 1003 if (!(flags & DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED)) 1004 prot |= IOMMU_CACHE; 1005 1006 if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_READONLY) 1007 prot |= IOMMU_READ; 1008 else 1009 prot |= IOMMU_READ | IOMMU_WRITE; 1010 1011 return prot; 1012 } 1013 1014 /** 1015 * panthor_vm_alloc_va() - Allocate a region in the auto-va space 1016 * @vm: VM to allocate a region on. 1017 * @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user 1018 * wants the VA to be automatically allocated from the auto-VA range. 1019 * @size: size of the VA range. 1020 * @va_node: drm_mm_node to initialize. Must be zero-initialized. 1021 * 1022 * Some GPU objects, like heap chunks, are fully managed by the kernel and 1023 * need to be mapped to the userspace VM, in the region reserved for kernel 1024 * objects. 1025 * 1026 * This function takes care of allocating a region in the kernel auto-VA space. 1027 * 1028 * Return: 0 on success, an error code otherwise. 1029 */ 1030 int 1031 panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, 1032 struct drm_mm_node *va_node) 1033 { 1034 ssize_t vm_pgsz = panthor_vm_page_size(vm); 1035 int ret; 1036 1037 if (!size || !IS_ALIGNED(size, vm_pgsz)) 1038 return -EINVAL; 1039 1040 if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz)) 1041 return -EINVAL; 1042 1043 mutex_lock(&vm->mm_lock); 1044 if (va != PANTHOR_VM_KERNEL_AUTO_VA) { 1045 va_node->start = va; 1046 va_node->size = size; 1047 ret = drm_mm_reserve_node(&vm->mm, va_node); 1048 } else { 1049 ret = drm_mm_insert_node_in_range(&vm->mm, va_node, size, 1050 size >= SZ_2M ? SZ_2M : SZ_4K, 1051 0, vm->kernel_auto_va.start, 1052 vm->kernel_auto_va.end, 1053 DRM_MM_INSERT_BEST); 1054 } 1055 mutex_unlock(&vm->mm_lock); 1056 1057 return ret; 1058 } 1059 1060 /** 1061 * panthor_vm_free_va() - Free a region allocated with panthor_vm_alloc_va() 1062 * @vm: VM to free the region on. 1063 * @va_node: Memory node representing the region to free. 1064 */ 1065 void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node) 1066 { 1067 mutex_lock(&vm->mm_lock); 1068 drm_mm_remove_node(va_node); 1069 mutex_unlock(&vm->mm_lock); 1070 } 1071 1072 static void panthor_vm_bo_free(struct drm_gpuvm_bo *vm_bo) 1073 { 1074 struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj); 1075 1076 if (!drm_gem_is_imported(&bo->base.base)) 1077 drm_gem_shmem_unpin(&bo->base); 1078 kfree(vm_bo); 1079 } 1080 1081 static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx, 1082 struct panthor_vm *vm) 1083 { 1084 u32 remaining_pt_count = op_ctx->rsvd_page_tables.count - 1085 op_ctx->rsvd_page_tables.ptr; 1086 1087 if (remaining_pt_count) { 1088 kmem_cache_free_bulk(pt_cache, remaining_pt_count, 1089 op_ctx->rsvd_page_tables.pages + 1090 op_ctx->rsvd_page_tables.ptr); 1091 } 1092 1093 kfree(op_ctx->rsvd_page_tables.pages); 1094 1095 if (op_ctx->map.vm_bo) 1096 drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo); 1097 1098 for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) 1099 kfree(op_ctx->preallocated_vmas[i]); 1100 1101 drm_gpuvm_bo_deferred_cleanup(&vm->base); 1102 } 1103 1104 static void 1105 panthor_vm_op_ctx_return_vma(struct panthor_vm_op_ctx *op_ctx, 1106 struct panthor_vma *vma) 1107 { 1108 for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) { 1109 if (!op_ctx->preallocated_vmas[i]) { 1110 op_ctx->preallocated_vmas[i] = vma; 1111 return; 1112 } 1113 } 1114 1115 WARN_ON_ONCE(1); 1116 } 1117 1118 static struct panthor_vma * 1119 panthor_vm_op_ctx_get_vma(struct panthor_vm_op_ctx *op_ctx) 1120 { 1121 for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) { 1122 struct panthor_vma *vma = op_ctx->preallocated_vmas[i]; 1123 1124 if (vma) { 1125 op_ctx->preallocated_vmas[i] = NULL; 1126 return vma; 1127 } 1128 } 1129 1130 return NULL; 1131 } 1132 1133 static int 1134 panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx) 1135 { 1136 u32 vma_count; 1137 1138 switch (op_ctx->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) { 1139 case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: 1140 /* One VMA for the new mapping, and two more VMAs for the remap case 1141 * which might contain both a prev and next VA. 1142 */ 1143 vma_count = 3; 1144 break; 1145 1146 case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: 1147 /* Two VMAs can be needed for an unmap, as an unmap can happen 1148 * in the middle of a drm_gpuva, requiring a remap with both 1149 * prev & next VA. Or an unmap can span more than one drm_gpuva 1150 * where the first and last ones are covered partially, requring 1151 * a remap for the first with a prev VA and remap for the last 1152 * with a next VA. 1153 */ 1154 vma_count = 2; 1155 break; 1156 1157 default: 1158 return 0; 1159 } 1160 1161 for (u32 i = 0; i < vma_count; i++) { 1162 struct panthor_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); 1163 1164 if (!vma) 1165 return -ENOMEM; 1166 1167 op_ctx->preallocated_vmas[i] = vma; 1168 } 1169 1170 return 0; 1171 } 1172 1173 #define PANTHOR_VM_BIND_OP_MAP_FLAGS \ 1174 (DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \ 1175 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \ 1176 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED | \ 1177 DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) 1178 1179 static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, 1180 struct panthor_vm *vm, 1181 struct panthor_gem_object *bo, 1182 u64 offset, 1183 u64 size, u64 va, 1184 u32 flags) 1185 { 1186 struct drm_gpuvm_bo *preallocated_vm_bo; 1187 struct sg_table *sgt = NULL; 1188 u64 pt_count; 1189 int ret; 1190 1191 if (!bo) 1192 return -EINVAL; 1193 1194 if ((flags & ~PANTHOR_VM_BIND_OP_MAP_FLAGS) || 1195 (flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP) 1196 return -EINVAL; 1197 1198 /* Make sure the VA and size are in-bounds. */ 1199 if (size > bo->base.base.size || offset > bo->base.base.size - size) 1200 return -EINVAL; 1201 1202 /* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */ 1203 if (bo->exclusive_vm_root_gem && 1204 bo->exclusive_vm_root_gem != panthor_vm_root_gem(vm)) 1205 return -EINVAL; 1206 1207 memset(op_ctx, 0, sizeof(*op_ctx)); 1208 op_ctx->flags = flags; 1209 op_ctx->va.range = size; 1210 op_ctx->va.addr = va; 1211 1212 ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx); 1213 if (ret) 1214 goto err_cleanup; 1215 1216 if (!drm_gem_is_imported(&bo->base.base)) { 1217 /* Pre-reserve the BO pages, so the map operation doesn't have to 1218 * allocate. This pin is dropped in panthor_vm_bo_free(), so 1219 * once we have successfully called drm_gpuvm_bo_create(), 1220 * GPUVM will take care of dropping the pin for us. 1221 */ 1222 ret = drm_gem_shmem_pin(&bo->base); 1223 if (ret) 1224 goto err_cleanup; 1225 } 1226 1227 sgt = drm_gem_shmem_get_pages_sgt(&bo->base); 1228 if (IS_ERR(sgt)) { 1229 if (!drm_gem_is_imported(&bo->base.base)) 1230 drm_gem_shmem_unpin(&bo->base); 1231 1232 ret = PTR_ERR(sgt); 1233 goto err_cleanup; 1234 } 1235 1236 op_ctx->map.sgt = sgt; 1237 1238 preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base); 1239 if (!preallocated_vm_bo) { 1240 if (!drm_gem_is_imported(&bo->base.base)) 1241 drm_gem_shmem_unpin(&bo->base); 1242 1243 ret = -ENOMEM; 1244 goto err_cleanup; 1245 } 1246 1247 /* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our 1248 * pre-allocated BO if the <BO,VM> association exists. Given we 1249 * only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will 1250 * be called immediately, and we have to hold the VM resv lock when 1251 * calling this function. 1252 */ 1253 dma_resv_lock(panthor_vm_resv(vm), NULL); 1254 mutex_lock(&bo->base.base.gpuva.lock); 1255 op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo); 1256 mutex_unlock(&bo->base.base.gpuva.lock); 1257 dma_resv_unlock(panthor_vm_resv(vm)); 1258 1259 op_ctx->map.bo_offset = offset; 1260 1261 /* L1, L2 and L3 page tables. 1262 * We could optimize L3 allocation by iterating over the sgt and merging 1263 * 2M contiguous blocks, but it's simpler to over-provision and return 1264 * the pages if they're not used. 1265 */ 1266 pt_count = ((ALIGN(va + size, 1ull << 39) - ALIGN_DOWN(va, 1ull << 39)) >> 39) + 1267 ((ALIGN(va + size, 1ull << 30) - ALIGN_DOWN(va, 1ull << 30)) >> 30) + 1268 ((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21); 1269 1270 op_ctx->rsvd_page_tables.pages = kcalloc(pt_count, 1271 sizeof(*op_ctx->rsvd_page_tables.pages), 1272 GFP_KERNEL); 1273 if (!op_ctx->rsvd_page_tables.pages) { 1274 ret = -ENOMEM; 1275 goto err_cleanup; 1276 } 1277 1278 ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count, 1279 op_ctx->rsvd_page_tables.pages); 1280 op_ctx->rsvd_page_tables.count = ret; 1281 if (ret != pt_count) { 1282 ret = -ENOMEM; 1283 goto err_cleanup; 1284 } 1285 1286 /* Insert BO into the extobj list last, when we know nothing can fail. */ 1287 dma_resv_lock(panthor_vm_resv(vm), NULL); 1288 drm_gpuvm_bo_extobj_add(op_ctx->map.vm_bo); 1289 dma_resv_unlock(panthor_vm_resv(vm)); 1290 1291 return 0; 1292 1293 err_cleanup: 1294 panthor_vm_cleanup_op_ctx(op_ctx, vm); 1295 return ret; 1296 } 1297 1298 static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx, 1299 struct panthor_vm *vm, 1300 u64 va, u64 size) 1301 { 1302 u32 pt_count = 0; 1303 int ret; 1304 1305 memset(op_ctx, 0, sizeof(*op_ctx)); 1306 op_ctx->va.range = size; 1307 op_ctx->va.addr = va; 1308 op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP; 1309 1310 /* Pre-allocate L3 page tables to account for the split-2M-block 1311 * situation on unmap. 1312 */ 1313 if (va != ALIGN(va, SZ_2M)) 1314 pt_count++; 1315 1316 if (va + size != ALIGN(va + size, SZ_2M) && 1317 ALIGN(va + size, SZ_2M) != ALIGN(va, SZ_2M)) 1318 pt_count++; 1319 1320 ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx); 1321 if (ret) 1322 goto err_cleanup; 1323 1324 if (pt_count) { 1325 op_ctx->rsvd_page_tables.pages = kcalloc(pt_count, 1326 sizeof(*op_ctx->rsvd_page_tables.pages), 1327 GFP_KERNEL); 1328 if (!op_ctx->rsvd_page_tables.pages) { 1329 ret = -ENOMEM; 1330 goto err_cleanup; 1331 } 1332 1333 ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count, 1334 op_ctx->rsvd_page_tables.pages); 1335 if (ret != pt_count) { 1336 ret = -ENOMEM; 1337 goto err_cleanup; 1338 } 1339 op_ctx->rsvd_page_tables.count = pt_count; 1340 } 1341 1342 return 0; 1343 1344 err_cleanup: 1345 panthor_vm_cleanup_op_ctx(op_ctx, vm); 1346 return ret; 1347 } 1348 1349 static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx, 1350 struct panthor_vm *vm) 1351 { 1352 memset(op_ctx, 0, sizeof(*op_ctx)); 1353 op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY; 1354 } 1355 1356 /** 1357 * panthor_vm_get_bo_for_va() - Get the GEM object mapped at a virtual address 1358 * @vm: VM to look into. 1359 * @va: Virtual address to search for. 1360 * @bo_offset: Offset of the GEM object mapped at this virtual address. 1361 * Only valid on success. 1362 * 1363 * The object returned by this function might no longer be mapped when the 1364 * function returns. It's the caller responsibility to ensure there's no 1365 * concurrent map/unmap operations making the returned value invalid, or 1366 * make sure it doesn't matter if the object is no longer mapped. 1367 * 1368 * Return: A valid pointer on success, an ERR_PTR() otherwise. 1369 */ 1370 struct panthor_gem_object * 1371 panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset) 1372 { 1373 struct panthor_gem_object *bo = ERR_PTR(-ENOENT); 1374 struct drm_gpuva *gpuva; 1375 struct panthor_vma *vma; 1376 1377 /* Take the VM lock to prevent concurrent map/unmap operations. */ 1378 mutex_lock(&vm->op_lock); 1379 gpuva = drm_gpuva_find_first(&vm->base, va, 1); 1380 vma = gpuva ? container_of(gpuva, struct panthor_vma, base) : NULL; 1381 if (vma && vma->base.gem.obj) { 1382 drm_gem_object_get(vma->base.gem.obj); 1383 bo = to_panthor_bo(vma->base.gem.obj); 1384 *bo_offset = vma->base.gem.offset + (va - vma->base.va.addr); 1385 } 1386 mutex_unlock(&vm->op_lock); 1387 1388 return bo; 1389 } 1390 1391 #define PANTHOR_VM_MIN_KERNEL_VA_SIZE SZ_256M 1392 1393 static u64 1394 panthor_vm_create_get_user_va_range(const struct drm_panthor_vm_create *args, 1395 u64 full_va_range) 1396 { 1397 u64 user_va_range; 1398 1399 /* Make sure we have a minimum amount of VA space for kernel objects. */ 1400 if (full_va_range < PANTHOR_VM_MIN_KERNEL_VA_SIZE) 1401 return 0; 1402 1403 if (args->user_va_range) { 1404 /* Use the user provided value if != 0. */ 1405 user_va_range = args->user_va_range; 1406 } else if (TASK_SIZE_OF(current) < full_va_range) { 1407 /* If the task VM size is smaller than the GPU VA range, pick this 1408 * as our default user VA range, so userspace can CPU/GPU map buffers 1409 * at the same address. 1410 */ 1411 user_va_range = TASK_SIZE_OF(current); 1412 } else { 1413 /* If the GPU VA range is smaller than the task VM size, we 1414 * just have to live with the fact we won't be able to map 1415 * all buffers at the same GPU/CPU address. 1416 * 1417 * If the GPU VA range is bigger than 4G (more than 32-bit of 1418 * VA), we split the range in two, and assign half of it to 1419 * the user and the other half to the kernel, if it's not, we 1420 * keep the kernel VA space as small as possible. 1421 */ 1422 user_va_range = full_va_range > SZ_4G ? 1423 full_va_range / 2 : 1424 full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE; 1425 } 1426 1427 if (full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE < user_va_range) 1428 user_va_range = full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE; 1429 1430 return user_va_range; 1431 } 1432 1433 #define PANTHOR_VM_CREATE_FLAGS 0 1434 1435 static int 1436 panthor_vm_create_check_args(const struct panthor_device *ptdev, 1437 const struct drm_panthor_vm_create *args, 1438 u64 *kernel_va_start, u64 *kernel_va_range) 1439 { 1440 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features); 1441 u64 full_va_range = 1ull << va_bits; 1442 u64 user_va_range; 1443 1444 if (args->flags & ~PANTHOR_VM_CREATE_FLAGS) 1445 return -EINVAL; 1446 1447 user_va_range = panthor_vm_create_get_user_va_range(args, full_va_range); 1448 if (!user_va_range || (args->user_va_range && args->user_va_range > user_va_range)) 1449 return -EINVAL; 1450 1451 /* Pick a kernel VA range that's a power of two, to have a clear split. */ 1452 *kernel_va_range = rounddown_pow_of_two(full_va_range - user_va_range); 1453 *kernel_va_start = full_va_range - *kernel_va_range; 1454 return 0; 1455 } 1456 1457 /* 1458 * Only 32 VMs per open file. If that becomes a limiting factor, we can 1459 * increase this number. 1460 */ 1461 #define PANTHOR_MAX_VMS_PER_FILE 32 1462 1463 /** 1464 * panthor_vm_pool_create_vm() - Create a VM 1465 * @ptdev: The panthor device 1466 * @pool: The VM to create this VM on. 1467 * @args: VM creation args. 1468 * 1469 * Return: a positive VM ID on success, a negative error code otherwise. 1470 */ 1471 int panthor_vm_pool_create_vm(struct panthor_device *ptdev, 1472 struct panthor_vm_pool *pool, 1473 struct drm_panthor_vm_create *args) 1474 { 1475 u64 kernel_va_start, kernel_va_range; 1476 struct panthor_vm *vm; 1477 int ret; 1478 u32 id; 1479 1480 ret = panthor_vm_create_check_args(ptdev, args, &kernel_va_start, &kernel_va_range); 1481 if (ret) 1482 return ret; 1483 1484 vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range, 1485 kernel_va_start, kernel_va_range); 1486 if (IS_ERR(vm)) 1487 return PTR_ERR(vm); 1488 1489 ret = xa_alloc(&pool->xa, &id, vm, 1490 XA_LIMIT(1, PANTHOR_MAX_VMS_PER_FILE), GFP_KERNEL); 1491 1492 if (ret) { 1493 panthor_vm_put(vm); 1494 return ret; 1495 } 1496 1497 args->user_va_range = kernel_va_start; 1498 return id; 1499 } 1500 1501 static void panthor_vm_destroy(struct panthor_vm *vm) 1502 { 1503 if (!vm) 1504 return; 1505 1506 vm->destroyed = true; 1507 1508 /* Tell scheduler to stop all GPU work related to this VM */ 1509 if (refcount_read(&vm->as.active_cnt) > 0) 1510 panthor_sched_prepare_for_vm_destruction(vm->ptdev); 1511 1512 mutex_lock(&vm->heaps.lock); 1513 panthor_heap_pool_destroy(vm->heaps.pool); 1514 vm->heaps.pool = NULL; 1515 mutex_unlock(&vm->heaps.lock); 1516 1517 drm_WARN_ON(&vm->ptdev->base, 1518 panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range)); 1519 panthor_vm_put(vm); 1520 } 1521 1522 /** 1523 * panthor_vm_pool_destroy_vm() - Destroy a VM. 1524 * @pool: VM pool. 1525 * @handle: VM handle. 1526 * 1527 * This function doesn't free the VM object or its resources, it just kills 1528 * all mappings, and makes sure nothing can be mapped after that point. 1529 * 1530 * If there was any active jobs at the time this function is called, these 1531 * jobs should experience page faults and be killed as a result. 1532 * 1533 * The VM resources are freed when the last reference on the VM object is 1534 * dropped. 1535 * 1536 * Return: %0 for success, negative errno value for failure 1537 */ 1538 int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle) 1539 { 1540 struct panthor_vm *vm; 1541 1542 vm = xa_erase(&pool->xa, handle); 1543 1544 panthor_vm_destroy(vm); 1545 1546 return vm ? 0 : -EINVAL; 1547 } 1548 1549 /** 1550 * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle 1551 * @pool: VM pool to check. 1552 * @handle: Handle of the VM to retrieve. 1553 * 1554 * Return: A valid pointer if the VM exists, NULL otherwise. 1555 */ 1556 struct panthor_vm * 1557 panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle) 1558 { 1559 struct panthor_vm *vm; 1560 1561 xa_lock(&pool->xa); 1562 vm = panthor_vm_get(xa_load(&pool->xa, handle)); 1563 xa_unlock(&pool->xa); 1564 1565 return vm; 1566 } 1567 1568 /** 1569 * panthor_vm_pool_destroy() - Destroy a VM pool. 1570 * @pfile: File. 1571 * 1572 * Destroy all VMs in the pool, and release the pool resources. 1573 * 1574 * Note that VMs can outlive the pool they were created from if other 1575 * objects hold a reference to there VMs. 1576 */ 1577 void panthor_vm_pool_destroy(struct panthor_file *pfile) 1578 { 1579 struct panthor_vm *vm; 1580 unsigned long i; 1581 1582 if (!pfile->vms) 1583 return; 1584 1585 xa_for_each(&pfile->vms->xa, i, vm) 1586 panthor_vm_destroy(vm); 1587 1588 xa_destroy(&pfile->vms->xa); 1589 kfree(pfile->vms); 1590 } 1591 1592 /** 1593 * panthor_vm_pool_create() - Create a VM pool 1594 * @pfile: File. 1595 * 1596 * Return: 0 on success, a negative error code otherwise. 1597 */ 1598 int panthor_vm_pool_create(struct panthor_file *pfile) 1599 { 1600 pfile->vms = kzalloc(sizeof(*pfile->vms), GFP_KERNEL); 1601 if (!pfile->vms) 1602 return -ENOMEM; 1603 1604 xa_init_flags(&pfile->vms->xa, XA_FLAGS_ALLOC1); 1605 return 0; 1606 } 1607 1608 /* dummy TLB ops, the real TLB flush happens in panthor_vm_flush_range() */ 1609 static void mmu_tlb_flush_all(void *cookie) 1610 { 1611 } 1612 1613 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) 1614 { 1615 } 1616 1617 static const struct iommu_flush_ops mmu_tlb_ops = { 1618 .tlb_flush_all = mmu_tlb_flush_all, 1619 .tlb_flush_walk = mmu_tlb_flush_walk, 1620 }; 1621 1622 static const char *access_type_name(struct panthor_device *ptdev, 1623 u32 fault_status) 1624 { 1625 switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) { 1626 case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC: 1627 return "ATOMIC"; 1628 case AS_FAULTSTATUS_ACCESS_TYPE_READ: 1629 return "READ"; 1630 case AS_FAULTSTATUS_ACCESS_TYPE_WRITE: 1631 return "WRITE"; 1632 case AS_FAULTSTATUS_ACCESS_TYPE_EX: 1633 return "EXECUTE"; 1634 default: 1635 drm_WARN_ON(&ptdev->base, 1); 1636 return NULL; 1637 } 1638 } 1639 1640 static int panthor_vm_lock_region(struct panthor_vm *vm, u64 start, u64 size) 1641 { 1642 struct panthor_device *ptdev = vm->ptdev; 1643 int ret = 0; 1644 1645 /* sm_step_remap() can call panthor_vm_lock_region() to account for 1646 * the wider unmap needed when doing a partial huge page unamp. We 1647 * need to ignore the lock if it's already part of the locked region. 1648 */ 1649 if (start >= vm->locked_region.start && 1650 start + size <= vm->locked_region.start + vm->locked_region.size) 1651 return 0; 1652 1653 mutex_lock(&ptdev->mmu->as.slots_lock); 1654 if (vm->as.id >= 0 && size) { 1655 /* Lock the region that needs to be updated */ 1656 gpu_write64(ptdev, AS_LOCKADDR(vm->as.id), 1657 pack_region_range(ptdev, &start, &size)); 1658 1659 /* If the lock succeeded, update the locked_region info. */ 1660 ret = as_send_cmd_and_wait(ptdev, vm->as.id, AS_COMMAND_LOCK); 1661 } 1662 1663 if (!ret) { 1664 vm->locked_region.start = start; 1665 vm->locked_region.size = size; 1666 } 1667 mutex_unlock(&ptdev->mmu->as.slots_lock); 1668 1669 return ret; 1670 } 1671 1672 static void panthor_vm_unlock_region(struct panthor_vm *vm) 1673 { 1674 struct panthor_device *ptdev = vm->ptdev; 1675 1676 mutex_lock(&ptdev->mmu->as.slots_lock); 1677 if (vm->as.id >= 0) { 1678 int ret; 1679 1680 /* flush+invalidate RW caches and invalidate RO ones. 1681 * TODO: See if we can use FLUSH_PA_RANGE when the physical 1682 * range is narrow enough and the HW supports it. 1683 */ 1684 ret = panthor_gpu_flush_caches(ptdev, CACHE_CLEAN | CACHE_INV, 1685 CACHE_CLEAN | CACHE_INV, 1686 CACHE_INV); 1687 1688 /* Unlock the region if the flush is effective. */ 1689 if (!ret) 1690 ret = as_send_cmd_and_wait(ptdev, vm->as.id, AS_COMMAND_UNLOCK); 1691 1692 /* If we fail to flush or unlock the region, schedule a GPU reset 1693 * to unblock the situation. 1694 */ 1695 if (ret) 1696 panthor_device_schedule_reset(ptdev); 1697 } 1698 vm->locked_region.start = 0; 1699 vm->locked_region.size = 0; 1700 mutex_unlock(&ptdev->mmu->as.slots_lock); 1701 } 1702 1703 static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status) 1704 { 1705 bool has_unhandled_faults = false; 1706 1707 status = panthor_mmu_fault_mask(ptdev, status); 1708 while (status) { 1709 u32 as = ffs(status | (status >> 16)) - 1; 1710 u32 mask = panthor_mmu_as_fault_mask(ptdev, as); 1711 u32 new_int_mask; 1712 u64 addr; 1713 u32 fault_status; 1714 u32 exception_type; 1715 u32 access_type; 1716 u32 source_id; 1717 1718 fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as)); 1719 addr = gpu_read64(ptdev, AS_FAULTADDRESS(as)); 1720 1721 /* decode the fault status */ 1722 exception_type = fault_status & 0xFF; 1723 access_type = (fault_status >> 8) & 0x3; 1724 source_id = (fault_status >> 16); 1725 1726 mutex_lock(&ptdev->mmu->as.slots_lock); 1727 1728 ptdev->mmu->as.faulty_mask |= mask; 1729 new_int_mask = 1730 panthor_mmu_fault_mask(ptdev, ~ptdev->mmu->as.faulty_mask); 1731 1732 /* terminal fault, print info about the fault */ 1733 drm_err(&ptdev->base, 1734 "Unhandled Page fault in AS%d at VA 0x%016llX\n" 1735 "raw fault status: 0x%X\n" 1736 "decoded fault status: %s\n" 1737 "exception type 0x%X: %s\n" 1738 "access type 0x%X: %s\n" 1739 "source id 0x%X\n", 1740 as, addr, 1741 fault_status, 1742 (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"), 1743 exception_type, panthor_exception_name(ptdev, exception_type), 1744 access_type, access_type_name(ptdev, fault_status), 1745 source_id); 1746 1747 /* We don't handle VM faults at the moment, so let's just clear the 1748 * interrupt and let the writer/reader crash. 1749 * Note that COMPLETED irqs are never cleared, but this is fine 1750 * because they are always masked. 1751 */ 1752 gpu_write(ptdev, MMU_INT_CLEAR, mask); 1753 1754 /* Ignore MMU interrupts on this AS until it's been 1755 * re-enabled. 1756 */ 1757 ptdev->mmu->irq.mask = new_int_mask; 1758 1759 if (ptdev->mmu->as.slots[as].vm) 1760 ptdev->mmu->as.slots[as].vm->unhandled_fault = true; 1761 1762 /* Disable the MMU to kill jobs on this AS. */ 1763 panthor_mmu_as_disable(ptdev, as, false); 1764 mutex_unlock(&ptdev->mmu->as.slots_lock); 1765 1766 status &= ~mask; 1767 has_unhandled_faults = true; 1768 } 1769 1770 if (has_unhandled_faults) 1771 panthor_sched_report_mmu_fault(ptdev); 1772 } 1773 PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler); 1774 1775 /** 1776 * panthor_mmu_suspend() - Suspend the MMU logic 1777 * @ptdev: Device. 1778 * 1779 * All we do here is de-assign the AS slots on all active VMs, so things 1780 * get flushed to the main memory, and no further access to these VMs are 1781 * possible. 1782 * 1783 * We also suspend the MMU IRQ. 1784 */ 1785 void panthor_mmu_suspend(struct panthor_device *ptdev) 1786 { 1787 mutex_lock(&ptdev->mmu->as.slots_lock); 1788 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) { 1789 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; 1790 1791 if (vm) { 1792 drm_WARN_ON(&ptdev->base, 1793 panthor_mmu_as_disable(ptdev, i, false)); 1794 panthor_vm_release_as_locked(vm); 1795 } 1796 } 1797 mutex_unlock(&ptdev->mmu->as.slots_lock); 1798 1799 panthor_mmu_irq_suspend(&ptdev->mmu->irq); 1800 } 1801 1802 /** 1803 * panthor_mmu_resume() - Resume the MMU logic 1804 * @ptdev: Device. 1805 * 1806 * Resume the IRQ. 1807 * 1808 * We don't re-enable previously active VMs. We assume other parts of the 1809 * driver will call panthor_vm_active() on the VMs they intend to use. 1810 */ 1811 void panthor_mmu_resume(struct panthor_device *ptdev) 1812 { 1813 mutex_lock(&ptdev->mmu->as.slots_lock); 1814 ptdev->mmu->as.alloc_mask = 0; 1815 ptdev->mmu->as.faulty_mask = 0; 1816 mutex_unlock(&ptdev->mmu->as.slots_lock); 1817 1818 panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0)); 1819 } 1820 1821 /** 1822 * panthor_mmu_pre_reset() - Prepare for a reset 1823 * @ptdev: Device. 1824 * 1825 * Suspend the IRQ, and make sure all VM_BIND queues are stopped, so we 1826 * don't get asked to do a VM operation while the GPU is down. 1827 * 1828 * We don't cleanly shutdown the AS slots here, because the reset might 1829 * come from an AS_ACTIVE_BIT stuck situation. 1830 */ 1831 void panthor_mmu_pre_reset(struct panthor_device *ptdev) 1832 { 1833 struct panthor_vm *vm; 1834 1835 panthor_mmu_irq_suspend(&ptdev->mmu->irq); 1836 1837 mutex_lock(&ptdev->mmu->vm.lock); 1838 ptdev->mmu->vm.reset_in_progress = true; 1839 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) 1840 panthor_vm_stop(vm); 1841 mutex_unlock(&ptdev->mmu->vm.lock); 1842 } 1843 1844 /** 1845 * panthor_mmu_post_reset() - Restore things after a reset 1846 * @ptdev: Device. 1847 * 1848 * Put the MMU logic back in action after a reset. That implies resuming the 1849 * IRQ and re-enabling the VM_BIND queues. 1850 */ 1851 void panthor_mmu_post_reset(struct panthor_device *ptdev) 1852 { 1853 struct panthor_vm *vm; 1854 1855 mutex_lock(&ptdev->mmu->as.slots_lock); 1856 1857 /* Now that the reset is effective, we can assume that none of the 1858 * AS slots are setup, and clear the faulty flags too. 1859 */ 1860 ptdev->mmu->as.alloc_mask = 0; 1861 ptdev->mmu->as.faulty_mask = 0; 1862 1863 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) { 1864 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; 1865 1866 if (vm) 1867 panthor_vm_release_as_locked(vm); 1868 } 1869 1870 mutex_unlock(&ptdev->mmu->as.slots_lock); 1871 1872 panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0)); 1873 1874 /* Restart the VM_BIND queues. */ 1875 mutex_lock(&ptdev->mmu->vm.lock); 1876 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) { 1877 panthor_vm_start(vm); 1878 } 1879 ptdev->mmu->vm.reset_in_progress = false; 1880 mutex_unlock(&ptdev->mmu->vm.lock); 1881 } 1882 1883 static void panthor_vm_free(struct drm_gpuvm *gpuvm) 1884 { 1885 struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base); 1886 struct panthor_device *ptdev = vm->ptdev; 1887 1888 mutex_lock(&vm->heaps.lock); 1889 if (drm_WARN_ON(&ptdev->base, vm->heaps.pool)) 1890 panthor_heap_pool_destroy(vm->heaps.pool); 1891 mutex_unlock(&vm->heaps.lock); 1892 mutex_destroy(&vm->heaps.lock); 1893 1894 mutex_lock(&ptdev->mmu->vm.lock); 1895 list_del(&vm->node); 1896 /* Restore the scheduler state so we can call drm_sched_entity_destroy() 1897 * and drm_sched_fini(). If get there, that means we have no job left 1898 * and no new jobs can be queued, so we can start the scheduler without 1899 * risking interfering with the reset. 1900 */ 1901 if (ptdev->mmu->vm.reset_in_progress) 1902 panthor_vm_start(vm); 1903 mutex_unlock(&ptdev->mmu->vm.lock); 1904 1905 drm_sched_entity_destroy(&vm->entity); 1906 drm_sched_fini(&vm->sched); 1907 1908 mutex_lock(&vm->op_lock); 1909 mutex_lock(&ptdev->mmu->as.slots_lock); 1910 if (vm->as.id >= 0) { 1911 int cookie; 1912 1913 if (drm_dev_enter(&ptdev->base, &cookie)) { 1914 panthor_mmu_as_disable(ptdev, vm->as.id, false); 1915 drm_dev_exit(cookie); 1916 } 1917 1918 ptdev->mmu->as.slots[vm->as.id].vm = NULL; 1919 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); 1920 list_del(&vm->as.lru_node); 1921 } 1922 mutex_unlock(&ptdev->mmu->as.slots_lock); 1923 mutex_unlock(&vm->op_lock); 1924 1925 free_io_pgtable_ops(vm->pgtbl_ops); 1926 1927 drm_mm_takedown(&vm->mm); 1928 kfree(vm); 1929 } 1930 1931 /** 1932 * panthor_vm_put() - Release a reference on a VM 1933 * @vm: VM to release the reference on. Can be NULL. 1934 */ 1935 void panthor_vm_put(struct panthor_vm *vm) 1936 { 1937 drm_gpuvm_put(vm ? &vm->base : NULL); 1938 } 1939 1940 /** 1941 * panthor_vm_get() - Get a VM reference 1942 * @vm: VM to get the reference on. Can be NULL. 1943 * 1944 * Return: @vm value. 1945 */ 1946 struct panthor_vm *panthor_vm_get(struct panthor_vm *vm) 1947 { 1948 if (vm) 1949 drm_gpuvm_get(&vm->base); 1950 1951 return vm; 1952 } 1953 1954 /** 1955 * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM 1956 * @vm: VM to query the heap pool on. 1957 * @create: True if the heap pool should be created when it doesn't exist. 1958 * 1959 * Heap pools are per-VM. This function allows one to retrieve the heap pool 1960 * attached to a VM. 1961 * 1962 * If no heap pool exists yet, and @create is true, we create one. 1963 * 1964 * The returned panthor_heap_pool should be released with panthor_heap_pool_put(). 1965 * 1966 * Return: A valid pointer on success, an ERR_PTR() otherwise. 1967 */ 1968 struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create) 1969 { 1970 struct panthor_heap_pool *pool; 1971 1972 mutex_lock(&vm->heaps.lock); 1973 if (!vm->heaps.pool && create) { 1974 if (vm->destroyed) 1975 pool = ERR_PTR(-EINVAL); 1976 else 1977 pool = panthor_heap_pool_create(vm->ptdev, vm); 1978 1979 if (!IS_ERR(pool)) 1980 vm->heaps.pool = panthor_heap_pool_get(pool); 1981 } else { 1982 pool = panthor_heap_pool_get(vm->heaps.pool); 1983 if (!pool) 1984 pool = ERR_PTR(-ENOENT); 1985 } 1986 mutex_unlock(&vm->heaps.lock); 1987 1988 return pool; 1989 } 1990 1991 /** 1992 * panthor_vm_heaps_sizes() - Calculate size of all heap chunks across all 1993 * heaps over all the heap pools in a VM 1994 * @pfile: File. 1995 * @stats: Memory stats to be updated. 1996 * 1997 * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM 1998 * is active, record the size as active as well. 1999 */ 2000 void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats) 2001 { 2002 struct panthor_vm *vm; 2003 unsigned long i; 2004 2005 if (!pfile->vms) 2006 return; 2007 2008 xa_lock(&pfile->vms->xa); 2009 xa_for_each(&pfile->vms->xa, i, vm) { 2010 size_t size = panthor_heap_pool_size(vm->heaps.pool); 2011 stats->resident += size; 2012 if (vm->as.id >= 0) 2013 stats->active += size; 2014 } 2015 xa_unlock(&pfile->vms->xa); 2016 } 2017 2018 static u64 mair_to_memattr(u64 mair, bool coherent) 2019 { 2020 u64 memattr = 0; 2021 u32 i; 2022 2023 for (i = 0; i < 8; i++) { 2024 u8 in_attr = mair >> (8 * i), out_attr; 2025 u8 outer = in_attr >> 4, inner = in_attr & 0xf; 2026 2027 /* For caching to be enabled, inner and outer caching policy 2028 * have to be both write-back, if one of them is write-through 2029 * or non-cacheable, we just choose non-cacheable. Device 2030 * memory is also translated to non-cacheable. 2031 */ 2032 if (!(outer & 3) || !(outer & 4) || !(inner & 4)) { 2033 out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC | 2034 AS_MEMATTR_AARCH64_SH_MIDGARD_INNER | 2035 AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false); 2036 } else { 2037 out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB | 2038 AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2); 2039 /* Use SH_MIDGARD_INNER mode when device isn't coherent, 2040 * so SH_IS, which is used when IOMMU_CACHE is set, maps 2041 * to Mali's internal-shareable mode. As per the Mali 2042 * Spec, inner and outer-shareable modes aren't allowed 2043 * for WB memory when coherency is disabled. 2044 * Use SH_CPU_INNER mode when coherency is enabled, so 2045 * that SH_IS actually maps to the standard definition of 2046 * inner-shareable. 2047 */ 2048 if (!coherent) 2049 out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER; 2050 else 2051 out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER; 2052 } 2053 2054 memattr |= (u64)out_attr << (8 * i); 2055 } 2056 2057 return memattr; 2058 } 2059 2060 static void panthor_vma_link(struct panthor_vm *vm, 2061 struct panthor_vma *vma, 2062 struct drm_gpuvm_bo *vm_bo) 2063 { 2064 struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj); 2065 2066 mutex_lock(&bo->base.base.gpuva.lock); 2067 drm_gpuva_link(&vma->base, vm_bo); 2068 mutex_unlock(&bo->base.base.gpuva.lock); 2069 } 2070 2071 static void panthor_vma_unlink(struct panthor_vma *vma) 2072 { 2073 drm_gpuva_unlink_defer(&vma->base); 2074 kfree(vma); 2075 } 2076 2077 static void panthor_vma_init(struct panthor_vma *vma, u32 flags) 2078 { 2079 INIT_LIST_HEAD(&vma->node); 2080 vma->flags = flags; 2081 } 2082 2083 #define PANTHOR_VM_MAP_FLAGS \ 2084 (DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \ 2085 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \ 2086 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED) 2087 2088 static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv) 2089 { 2090 struct panthor_vm *vm = priv; 2091 struct panthor_vm_op_ctx *op_ctx = vm->op_ctx; 2092 struct panthor_vma *vma = panthor_vm_op_ctx_get_vma(op_ctx); 2093 int ret; 2094 2095 if (!vma) 2096 return -EINVAL; 2097 2098 panthor_vma_init(vma, op_ctx->flags & PANTHOR_VM_MAP_FLAGS); 2099 2100 ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags), 2101 op_ctx->map.sgt, op->map.gem.offset, 2102 op->map.va.range); 2103 if (ret) { 2104 panthor_vm_op_ctx_return_vma(op_ctx, vma); 2105 return ret; 2106 } 2107 2108 drm_gpuva_map(&vm->base, &vma->base, &op->map); 2109 panthor_vma_link(vm, vma, op_ctx->map.vm_bo); 2110 2111 drm_gpuvm_bo_put_deferred(op_ctx->map.vm_bo); 2112 op_ctx->map.vm_bo = NULL; 2113 2114 return 0; 2115 } 2116 2117 static bool 2118 iova_mapped_as_huge_page(struct drm_gpuva_op_map *op, u64 addr) 2119 { 2120 const struct page *pg; 2121 pgoff_t bo_offset; 2122 2123 bo_offset = addr - op->va.addr + op->gem.offset; 2124 pg = to_panthor_bo(op->gem.obj)->base.pages[bo_offset >> PAGE_SHIFT]; 2125 2126 return folio_size(page_folio(pg)) >= SZ_2M; 2127 } 2128 2129 static void 2130 unmap_hugepage_align(const struct drm_gpuva_op_remap *op, 2131 u64 *unmap_start, u64 *unmap_range) 2132 { 2133 u64 aligned_unmap_start, aligned_unmap_end, unmap_end; 2134 2135 unmap_end = *unmap_start + *unmap_range; 2136 aligned_unmap_start = ALIGN_DOWN(*unmap_start, SZ_2M); 2137 aligned_unmap_end = ALIGN(unmap_end, SZ_2M); 2138 2139 /* If we're dealing with a huge page, make sure the unmap region is 2140 * aligned on the start of the page. 2141 */ 2142 if (op->prev && aligned_unmap_start < *unmap_start && 2143 op->prev->va.addr <= aligned_unmap_start && 2144 iova_mapped_as_huge_page(op->prev, *unmap_start)) { 2145 *unmap_range += *unmap_start - aligned_unmap_start; 2146 *unmap_start = aligned_unmap_start; 2147 } 2148 2149 /* If we're dealing with a huge page, make sure the unmap region is 2150 * aligned on the end of the page. 2151 */ 2152 if (op->next && aligned_unmap_end > unmap_end && 2153 op->next->va.addr + op->next->va.range >= aligned_unmap_end && 2154 iova_mapped_as_huge_page(op->next, unmap_end - 1)) { 2155 *unmap_range += aligned_unmap_end - unmap_end; 2156 } 2157 } 2158 2159 static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op, 2160 void *priv) 2161 { 2162 struct panthor_vma *unmap_vma = container_of(op->remap.unmap->va, struct panthor_vma, base); 2163 struct panthor_vm *vm = priv; 2164 struct panthor_vm_op_ctx *op_ctx = vm->op_ctx; 2165 struct panthor_vma *prev_vma = NULL, *next_vma = NULL; 2166 u64 unmap_start, unmap_range; 2167 int ret; 2168 2169 drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range); 2170 2171 /* 2172 * ARM IOMMU page table management code disallows partial unmaps of huge pages, 2173 * so when a partial unmap is requested, we must first unmap the entire huge 2174 * page and then remap the difference between the huge page minus the requested 2175 * unmap region. Calculating the right start address and range for the expanded 2176 * unmap operation is the responsibility of the following function. 2177 */ 2178 unmap_hugepage_align(&op->remap, &unmap_start, &unmap_range); 2179 2180 /* If the range changed, we might have to lock a wider region to guarantee 2181 * atomicity. panthor_vm_lock_region() bails out early if the new region 2182 * is already part of the locked region, so no need to do this check here. 2183 */ 2184 panthor_vm_lock_region(vm, unmap_start, unmap_range); 2185 panthor_vm_unmap_pages(vm, unmap_start, unmap_range); 2186 2187 if (op->remap.prev) { 2188 struct panthor_gem_object *bo = to_panthor_bo(op->remap.prev->gem.obj); 2189 u64 offset = op->remap.prev->gem.offset + unmap_start - op->remap.prev->va.addr; 2190 u64 size = op->remap.prev->va.addr + op->remap.prev->va.range - unmap_start; 2191 2192 ret = panthor_vm_map_pages(vm, unmap_start, flags_to_prot(unmap_vma->flags), 2193 bo->base.sgt, offset, size); 2194 if (ret) 2195 return ret; 2196 2197 prev_vma = panthor_vm_op_ctx_get_vma(op_ctx); 2198 panthor_vma_init(prev_vma, unmap_vma->flags); 2199 } 2200 2201 if (op->remap.next) { 2202 struct panthor_gem_object *bo = to_panthor_bo(op->remap.next->gem.obj); 2203 u64 addr = op->remap.next->va.addr; 2204 u64 size = unmap_start + unmap_range - op->remap.next->va.addr; 2205 2206 ret = panthor_vm_map_pages(vm, addr, flags_to_prot(unmap_vma->flags), 2207 bo->base.sgt, op->remap.next->gem.offset, size); 2208 if (ret) 2209 return ret; 2210 2211 next_vma = panthor_vm_op_ctx_get_vma(op_ctx); 2212 panthor_vma_init(next_vma, unmap_vma->flags); 2213 } 2214 2215 drm_gpuva_remap(prev_vma ? &prev_vma->base : NULL, 2216 next_vma ? &next_vma->base : NULL, 2217 &op->remap); 2218 2219 if (prev_vma) { 2220 /* panthor_vma_link() transfers the vm_bo ownership to 2221 * the VMA object. Since the vm_bo we're passing is still 2222 * owned by the old mapping which will be released when this 2223 * mapping is destroyed, we need to grab a ref here. 2224 */ 2225 panthor_vma_link(vm, prev_vma, op->remap.unmap->va->vm_bo); 2226 } 2227 2228 if (next_vma) { 2229 panthor_vma_link(vm, next_vma, op->remap.unmap->va->vm_bo); 2230 } 2231 2232 panthor_vma_unlink(unmap_vma); 2233 return 0; 2234 } 2235 2236 static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op, 2237 void *priv) 2238 { 2239 struct panthor_vma *unmap_vma = container_of(op->unmap.va, struct panthor_vma, base); 2240 struct panthor_vm *vm = priv; 2241 2242 panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr, 2243 unmap_vma->base.va.range); 2244 drm_gpuva_unmap(&op->unmap); 2245 panthor_vma_unlink(unmap_vma); 2246 return 0; 2247 } 2248 2249 static const struct drm_gpuvm_ops panthor_gpuvm_ops = { 2250 .vm_free = panthor_vm_free, 2251 .vm_bo_free = panthor_vm_bo_free, 2252 .sm_step_map = panthor_gpuva_sm_step_map, 2253 .sm_step_remap = panthor_gpuva_sm_step_remap, 2254 .sm_step_unmap = panthor_gpuva_sm_step_unmap, 2255 }; 2256 2257 /** 2258 * panthor_vm_resv() - Get the dma_resv object attached to a VM. 2259 * @vm: VM to get the dma_resv of. 2260 * 2261 * Return: A dma_resv object. 2262 */ 2263 struct dma_resv *panthor_vm_resv(struct panthor_vm *vm) 2264 { 2265 return drm_gpuvm_resv(&vm->base); 2266 } 2267 2268 struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm) 2269 { 2270 if (!vm) 2271 return NULL; 2272 2273 return vm->base.r_obj; 2274 } 2275 2276 static int 2277 panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op, 2278 bool flag_vm_unusable_on_failure) 2279 { 2280 u32 op_type = op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK; 2281 int ret; 2282 2283 if (op_type == DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY) 2284 return 0; 2285 2286 mutex_lock(&vm->op_lock); 2287 vm->op_ctx = op; 2288 2289 ret = panthor_vm_lock_region(vm, op->va.addr, op->va.range); 2290 if (ret) 2291 goto out; 2292 2293 switch (op_type) { 2294 case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: { 2295 const struct drm_gpuvm_map_req map_req = { 2296 .map.va.addr = op->va.addr, 2297 .map.va.range = op->va.range, 2298 .map.gem.obj = op->map.vm_bo->obj, 2299 .map.gem.offset = op->map.bo_offset, 2300 }; 2301 2302 if (vm->unusable) { 2303 ret = -EINVAL; 2304 break; 2305 } 2306 2307 ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req); 2308 break; 2309 } 2310 2311 case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: 2312 ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range); 2313 break; 2314 2315 default: 2316 ret = -EINVAL; 2317 break; 2318 } 2319 2320 panthor_vm_unlock_region(vm); 2321 2322 out: 2323 if (ret && flag_vm_unusable_on_failure) 2324 panthor_vm_declare_unusable(vm); 2325 2326 vm->op_ctx = NULL; 2327 mutex_unlock(&vm->op_lock); 2328 2329 return ret; 2330 } 2331 2332 static struct dma_fence * 2333 panthor_vm_bind_run_job(struct drm_sched_job *sched_job) 2334 { 2335 struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base); 2336 bool cookie; 2337 int ret; 2338 2339 /* Not only we report an error whose result is propagated to the 2340 * drm_sched finished fence, but we also flag the VM as unusable, because 2341 * a failure in the async VM_BIND results in an inconsistent state. VM needs 2342 * to be destroyed and recreated. 2343 */ 2344 cookie = dma_fence_begin_signalling(); 2345 ret = panthor_vm_exec_op(job->vm, &job->ctx, true); 2346 dma_fence_end_signalling(cookie); 2347 2348 return ret ? ERR_PTR(ret) : NULL; 2349 } 2350 2351 static void panthor_vm_bind_job_release(struct kref *kref) 2352 { 2353 struct panthor_vm_bind_job *job = container_of(kref, struct panthor_vm_bind_job, refcount); 2354 2355 if (job->base.s_fence) 2356 drm_sched_job_cleanup(&job->base); 2357 2358 panthor_vm_cleanup_op_ctx(&job->ctx, job->vm); 2359 panthor_vm_put(job->vm); 2360 kfree(job); 2361 } 2362 2363 /** 2364 * panthor_vm_bind_job_put() - Release a VM_BIND job reference 2365 * @sched_job: Job to release the reference on. 2366 */ 2367 void panthor_vm_bind_job_put(struct drm_sched_job *sched_job) 2368 { 2369 struct panthor_vm_bind_job *job = 2370 container_of(sched_job, struct panthor_vm_bind_job, base); 2371 2372 if (sched_job) 2373 kref_put(&job->refcount, panthor_vm_bind_job_release); 2374 } 2375 2376 static void 2377 panthor_vm_bind_free_job(struct drm_sched_job *sched_job) 2378 { 2379 struct panthor_vm_bind_job *job = 2380 container_of(sched_job, struct panthor_vm_bind_job, base); 2381 2382 drm_sched_job_cleanup(sched_job); 2383 2384 /* Do the heavy cleanups asynchronously, so we're out of the 2385 * dma-signaling path and can acquire dma-resv locks safely. 2386 */ 2387 queue_work(panthor_cleanup_wq, &job->cleanup_op_ctx_work); 2388 } 2389 2390 static enum drm_gpu_sched_stat 2391 panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job) 2392 { 2393 WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!"); 2394 return DRM_GPU_SCHED_STAT_RESET; 2395 } 2396 2397 static const struct drm_sched_backend_ops panthor_vm_bind_ops = { 2398 .run_job = panthor_vm_bind_run_job, 2399 .free_job = panthor_vm_bind_free_job, 2400 .timedout_job = panthor_vm_bind_timedout_job, 2401 }; 2402 2403 /** 2404 * panthor_vm_create() - Create a VM 2405 * @ptdev: Device. 2406 * @for_mcu: True if this is the FW MCU VM. 2407 * @kernel_va_start: Start of the range reserved for kernel BO mapping. 2408 * @kernel_va_size: Size of the range reserved for kernel BO mapping. 2409 * @auto_kernel_va_start: Start of the auto-VA kernel range. 2410 * @auto_kernel_va_size: Size of the auto-VA kernel range. 2411 * 2412 * Return: A valid pointer on success, an ERR_PTR() otherwise. 2413 */ 2414 struct panthor_vm * 2415 panthor_vm_create(struct panthor_device *ptdev, bool for_mcu, 2416 u64 kernel_va_start, u64 kernel_va_size, 2417 u64 auto_kernel_va_start, u64 auto_kernel_va_size) 2418 { 2419 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features); 2420 u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features); 2421 u64 full_va_range = 1ull << va_bits; 2422 struct drm_gem_object *dummy_gem; 2423 struct drm_gpu_scheduler *sched; 2424 const struct drm_sched_init_args sched_args = { 2425 .ops = &panthor_vm_bind_ops, 2426 .submit_wq = ptdev->mmu->vm.wq, 2427 .num_rqs = 1, 2428 .credit_limit = 1, 2429 /* Bind operations are synchronous for now, no timeout needed. */ 2430 .timeout = MAX_SCHEDULE_TIMEOUT, 2431 .name = "panthor-vm-bind", 2432 .dev = ptdev->base.dev, 2433 }; 2434 struct io_pgtable_cfg pgtbl_cfg; 2435 u64 mair, min_va, va_range; 2436 struct panthor_vm *vm; 2437 int ret; 2438 2439 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 2440 if (!vm) 2441 return ERR_PTR(-ENOMEM); 2442 2443 /* We allocate a dummy GEM for the VM. */ 2444 dummy_gem = drm_gpuvm_resv_object_alloc(&ptdev->base); 2445 if (!dummy_gem) { 2446 ret = -ENOMEM; 2447 goto err_free_vm; 2448 } 2449 2450 mutex_init(&vm->heaps.lock); 2451 vm->for_mcu = for_mcu; 2452 vm->ptdev = ptdev; 2453 mutex_init(&vm->op_lock); 2454 2455 if (for_mcu) { 2456 /* CSF MCU is a cortex M7, and can only address 4G */ 2457 min_va = 0; 2458 va_range = SZ_4G; 2459 } else { 2460 min_va = 0; 2461 va_range = full_va_range; 2462 } 2463 2464 mutex_init(&vm->mm_lock); 2465 drm_mm_init(&vm->mm, kernel_va_start, kernel_va_size); 2466 vm->kernel_auto_va.start = auto_kernel_va_start; 2467 vm->kernel_auto_va.end = vm->kernel_auto_va.start + auto_kernel_va_size - 1; 2468 2469 INIT_LIST_HEAD(&vm->node); 2470 INIT_LIST_HEAD(&vm->as.lru_node); 2471 vm->as.id = -1; 2472 refcount_set(&vm->as.active_cnt, 0); 2473 2474 pgtbl_cfg = (struct io_pgtable_cfg) { 2475 .pgsize_bitmap = SZ_4K | SZ_2M, 2476 .ias = va_bits, 2477 .oas = pa_bits, 2478 .coherent_walk = ptdev->coherent, 2479 .tlb = &mmu_tlb_ops, 2480 .iommu_dev = ptdev->base.dev, 2481 .alloc = alloc_pt, 2482 .free = free_pt, 2483 }; 2484 2485 vm->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &pgtbl_cfg, vm); 2486 if (!vm->pgtbl_ops) { 2487 ret = -EINVAL; 2488 goto err_mm_takedown; 2489 } 2490 2491 ret = drm_sched_init(&vm->sched, &sched_args); 2492 if (ret) 2493 goto err_free_io_pgtable; 2494 2495 sched = &vm->sched; 2496 ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL); 2497 if (ret) 2498 goto err_sched_fini; 2499 2500 mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair; 2501 vm->memattr = mair_to_memattr(mair, ptdev->coherent); 2502 2503 mutex_lock(&ptdev->mmu->vm.lock); 2504 list_add_tail(&vm->node, &ptdev->mmu->vm.list); 2505 2506 /* If a reset is in progress, stop the scheduler. */ 2507 if (ptdev->mmu->vm.reset_in_progress) 2508 panthor_vm_stop(vm); 2509 mutex_unlock(&ptdev->mmu->vm.lock); 2510 2511 /* We intentionally leave the reserved range to zero, because we want kernel VMAs 2512 * to be handled the same way user VMAs are. 2513 */ 2514 drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM", 2515 DRM_GPUVM_RESV_PROTECTED | DRM_GPUVM_IMMEDIATE_MODE, 2516 &ptdev->base, dummy_gem, min_va, va_range, 0, 0, 2517 &panthor_gpuvm_ops); 2518 drm_gem_object_put(dummy_gem); 2519 return vm; 2520 2521 err_sched_fini: 2522 drm_sched_fini(&vm->sched); 2523 2524 err_free_io_pgtable: 2525 free_io_pgtable_ops(vm->pgtbl_ops); 2526 2527 err_mm_takedown: 2528 drm_mm_takedown(&vm->mm); 2529 drm_gem_object_put(dummy_gem); 2530 2531 err_free_vm: 2532 kfree(vm); 2533 return ERR_PTR(ret); 2534 } 2535 2536 static int 2537 panthor_vm_bind_prepare_op_ctx(struct drm_file *file, 2538 struct panthor_vm *vm, 2539 const struct drm_panthor_vm_bind_op *op, 2540 struct panthor_vm_op_ctx *op_ctx) 2541 { 2542 ssize_t vm_pgsz = panthor_vm_page_size(vm); 2543 struct drm_gem_object *gem; 2544 int ret; 2545 2546 /* Aligned on page size. */ 2547 if (!IS_ALIGNED(op->va | op->size | op->bo_offset, vm_pgsz)) 2548 return -EINVAL; 2549 2550 switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) { 2551 case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: 2552 gem = drm_gem_object_lookup(file, op->bo_handle); 2553 ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm, 2554 gem ? to_panthor_bo(gem) : NULL, 2555 op->bo_offset, 2556 op->size, 2557 op->va, 2558 op->flags); 2559 drm_gem_object_put(gem); 2560 return ret; 2561 2562 case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: 2563 if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) 2564 return -EINVAL; 2565 2566 if (op->bo_handle || op->bo_offset) 2567 return -EINVAL; 2568 2569 return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size); 2570 2571 case DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: 2572 if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) 2573 return -EINVAL; 2574 2575 if (op->bo_handle || op->bo_offset) 2576 return -EINVAL; 2577 2578 if (op->va || op->size) 2579 return -EINVAL; 2580 2581 if (!op->syncs.count) 2582 return -EINVAL; 2583 2584 panthor_vm_prepare_sync_only_op_ctx(op_ctx, vm); 2585 return 0; 2586 2587 default: 2588 return -EINVAL; 2589 } 2590 } 2591 2592 static void panthor_vm_bind_job_cleanup_op_ctx_work(struct work_struct *work) 2593 { 2594 struct panthor_vm_bind_job *job = 2595 container_of(work, struct panthor_vm_bind_job, cleanup_op_ctx_work); 2596 2597 panthor_vm_bind_job_put(&job->base); 2598 } 2599 2600 /** 2601 * panthor_vm_bind_job_create() - Create a VM_BIND job 2602 * @file: File. 2603 * @vm: VM targeted by the VM_BIND job. 2604 * @op: VM operation data. 2605 * 2606 * Return: A valid pointer on success, an ERR_PTR() otherwise. 2607 */ 2608 struct drm_sched_job * 2609 panthor_vm_bind_job_create(struct drm_file *file, 2610 struct panthor_vm *vm, 2611 const struct drm_panthor_vm_bind_op *op) 2612 { 2613 struct panthor_vm_bind_job *job; 2614 int ret; 2615 2616 if (!vm) 2617 return ERR_PTR(-EINVAL); 2618 2619 if (vm->destroyed || vm->unusable) 2620 return ERR_PTR(-EINVAL); 2621 2622 job = kzalloc(sizeof(*job), GFP_KERNEL); 2623 if (!job) 2624 return ERR_PTR(-ENOMEM); 2625 2626 ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &job->ctx); 2627 if (ret) { 2628 kfree(job); 2629 return ERR_PTR(ret); 2630 } 2631 2632 INIT_WORK(&job->cleanup_op_ctx_work, panthor_vm_bind_job_cleanup_op_ctx_work); 2633 kref_init(&job->refcount); 2634 job->vm = panthor_vm_get(vm); 2635 2636 ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm, file->client_id); 2637 if (ret) 2638 goto err_put_job; 2639 2640 return &job->base; 2641 2642 err_put_job: 2643 panthor_vm_bind_job_put(&job->base); 2644 return ERR_PTR(ret); 2645 } 2646 2647 /** 2648 * panthor_vm_bind_job_prepare_resvs() - Prepare VM_BIND job dma_resvs 2649 * @exec: The locking/preparation context. 2650 * @sched_job: The job to prepare resvs on. 2651 * 2652 * Locks and prepare the VM resv. 2653 * 2654 * If this is a map operation, locks and prepares the GEM resv. 2655 * 2656 * Return: 0 on success, a negative error code otherwise. 2657 */ 2658 int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec, 2659 struct drm_sched_job *sched_job) 2660 { 2661 struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base); 2662 int ret; 2663 2664 /* Acquire the VM lock an reserve a slot for this VM bind job. */ 2665 ret = drm_gpuvm_prepare_vm(&job->vm->base, exec, 1); 2666 if (ret) 2667 return ret; 2668 2669 if (job->ctx.map.vm_bo) { 2670 /* Lock/prepare the GEM being mapped. */ 2671 ret = drm_exec_prepare_obj(exec, job->ctx.map.vm_bo->obj, 1); 2672 if (ret) 2673 return ret; 2674 } 2675 2676 return 0; 2677 } 2678 2679 /** 2680 * panthor_vm_bind_job_update_resvs() - Update the resv objects touched by a job 2681 * @exec: drm_exec context. 2682 * @sched_job: Job to update the resvs on. 2683 */ 2684 void panthor_vm_bind_job_update_resvs(struct drm_exec *exec, 2685 struct drm_sched_job *sched_job) 2686 { 2687 struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base); 2688 2689 /* Explicit sync => we just register our job finished fence as bookkeep. */ 2690 drm_gpuvm_resv_add_fence(&job->vm->base, exec, 2691 &sched_job->s_fence->finished, 2692 DMA_RESV_USAGE_BOOKKEEP, 2693 DMA_RESV_USAGE_BOOKKEEP); 2694 } 2695 2696 void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec, 2697 struct dma_fence *fence, 2698 enum dma_resv_usage private_usage, 2699 enum dma_resv_usage extobj_usage) 2700 { 2701 drm_gpuvm_resv_add_fence(&vm->base, exec, fence, private_usage, extobj_usage); 2702 } 2703 2704 /** 2705 * panthor_vm_bind_exec_sync_op() - Execute a VM_BIND operation synchronously. 2706 * @file: File. 2707 * @vm: VM targeted by the VM operation. 2708 * @op: Data describing the VM operation. 2709 * 2710 * Return: 0 on success, a negative error code otherwise. 2711 */ 2712 int panthor_vm_bind_exec_sync_op(struct drm_file *file, 2713 struct panthor_vm *vm, 2714 struct drm_panthor_vm_bind_op *op) 2715 { 2716 struct panthor_vm_op_ctx op_ctx; 2717 int ret; 2718 2719 /* No sync objects allowed on synchronous operations. */ 2720 if (op->syncs.count) 2721 return -EINVAL; 2722 2723 if (!op->size) 2724 return 0; 2725 2726 ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &op_ctx); 2727 if (ret) 2728 return ret; 2729 2730 ret = panthor_vm_exec_op(vm, &op_ctx, false); 2731 panthor_vm_cleanup_op_ctx(&op_ctx, vm); 2732 2733 return ret; 2734 } 2735 2736 /** 2737 * panthor_vm_map_bo_range() - Map a GEM object range to a VM 2738 * @vm: VM to map the GEM to. 2739 * @bo: GEM object to map. 2740 * @offset: Offset in the GEM object. 2741 * @size: Size to map. 2742 * @va: Virtual address to map the object to. 2743 * @flags: Combination of drm_panthor_vm_bind_op_flags flags. 2744 * Only map-related flags are valid. 2745 * 2746 * Internal use only. For userspace requests, use 2747 * panthor_vm_bind_exec_sync_op() instead. 2748 * 2749 * Return: 0 on success, a negative error code otherwise. 2750 */ 2751 int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo, 2752 u64 offset, u64 size, u64 va, u32 flags) 2753 { 2754 struct panthor_vm_op_ctx op_ctx; 2755 int ret; 2756 2757 ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags); 2758 if (ret) 2759 return ret; 2760 2761 ret = panthor_vm_exec_op(vm, &op_ctx, false); 2762 panthor_vm_cleanup_op_ctx(&op_ctx, vm); 2763 2764 return ret; 2765 } 2766 2767 /** 2768 * panthor_vm_unmap_range() - Unmap a portion of the VA space 2769 * @vm: VM to unmap the region from. 2770 * @va: Virtual address to unmap. Must be 4k aligned. 2771 * @size: Size of the region to unmap. Must be 4k aligned. 2772 * 2773 * Internal use only. For userspace requests, use 2774 * panthor_vm_bind_exec_sync_op() instead. 2775 * 2776 * Return: 0 on success, a negative error code otherwise. 2777 */ 2778 int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size) 2779 { 2780 struct panthor_vm_op_ctx op_ctx; 2781 int ret; 2782 2783 ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size); 2784 if (ret) 2785 return ret; 2786 2787 ret = panthor_vm_exec_op(vm, &op_ctx, false); 2788 panthor_vm_cleanup_op_ctx(&op_ctx, vm); 2789 2790 return ret; 2791 } 2792 2793 /** 2794 * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs. 2795 * @exec: Locking/preparation context. 2796 * @vm: VM targeted by the GPU job. 2797 * @slot_count: Number of slots to reserve. 2798 * 2799 * GPU jobs assume all BOs bound to the VM at the time the job is submitted 2800 * are available when the job is executed. In order to guarantee that, we 2801 * need to reserve a slot on all BOs mapped to a VM and update this slot with 2802 * the job fence after its submission. 2803 * 2804 * Return: 0 on success, a negative error code otherwise. 2805 */ 2806 int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm, 2807 u32 slot_count) 2808 { 2809 int ret; 2810 2811 /* Acquire the VM lock and reserve a slot for this GPU job. */ 2812 ret = drm_gpuvm_prepare_vm(&vm->base, exec, slot_count); 2813 if (ret) 2814 return ret; 2815 2816 return drm_gpuvm_prepare_objects(&vm->base, exec, slot_count); 2817 } 2818 2819 /** 2820 * panthor_mmu_unplug() - Unplug the MMU logic 2821 * @ptdev: Device. 2822 * 2823 * No access to the MMU regs should be done after this function is called. 2824 * We suspend the IRQ and disable all VMs to guarantee that. 2825 */ 2826 void panthor_mmu_unplug(struct panthor_device *ptdev) 2827 { 2828 if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev)) 2829 panthor_mmu_irq_suspend(&ptdev->mmu->irq); 2830 2831 mutex_lock(&ptdev->mmu->as.slots_lock); 2832 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) { 2833 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; 2834 2835 if (vm) { 2836 drm_WARN_ON(&ptdev->base, 2837 panthor_mmu_as_disable(ptdev, i, false)); 2838 panthor_vm_release_as_locked(vm); 2839 } 2840 } 2841 mutex_unlock(&ptdev->mmu->as.slots_lock); 2842 } 2843 2844 static void panthor_mmu_release_wq(struct drm_device *ddev, void *res) 2845 { 2846 destroy_workqueue(res); 2847 } 2848 2849 /** 2850 * panthor_mmu_init() - Initialize the MMU logic. 2851 * @ptdev: Device. 2852 * 2853 * Return: 0 on success, a negative error code otherwise. 2854 */ 2855 int panthor_mmu_init(struct panthor_device *ptdev) 2856 { 2857 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features); 2858 struct panthor_mmu *mmu; 2859 int ret, irq; 2860 2861 mmu = drmm_kzalloc(&ptdev->base, sizeof(*mmu), GFP_KERNEL); 2862 if (!mmu) 2863 return -ENOMEM; 2864 2865 INIT_LIST_HEAD(&mmu->as.lru_list); 2866 2867 ret = drmm_mutex_init(&ptdev->base, &mmu->as.slots_lock); 2868 if (ret) 2869 return ret; 2870 2871 INIT_LIST_HEAD(&mmu->vm.list); 2872 ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock); 2873 if (ret) 2874 return ret; 2875 2876 ptdev->mmu = mmu; 2877 2878 irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "mmu"); 2879 if (irq <= 0) 2880 return -ENODEV; 2881 2882 ret = panthor_request_mmu_irq(ptdev, &mmu->irq, irq, 2883 panthor_mmu_fault_mask(ptdev, ~0)); 2884 if (ret) 2885 return ret; 2886 2887 mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0); 2888 if (!mmu->vm.wq) 2889 return -ENOMEM; 2890 2891 /* On 32-bit kernels, the VA space is limited by the io_pgtable_ops abstraction, 2892 * which passes iova as an unsigned long. Patch the mmu_features to reflect this 2893 * limitation. 2894 */ 2895 if (va_bits > BITS_PER_LONG) { 2896 ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0); 2897 ptdev->gpu_info.mmu_features |= BITS_PER_LONG; 2898 } 2899 2900 return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq); 2901 } 2902 2903 #ifdef CONFIG_DEBUG_FS 2904 static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m) 2905 { 2906 int ret; 2907 2908 mutex_lock(&vm->op_lock); 2909 ret = drm_debugfs_gpuva_info(m, &vm->base); 2910 mutex_unlock(&vm->op_lock); 2911 2912 return ret; 2913 } 2914 2915 static int show_each_vm(struct seq_file *m, void *arg) 2916 { 2917 struct drm_info_node *node = (struct drm_info_node *)m->private; 2918 struct drm_device *ddev = node->minor->dev; 2919 struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base); 2920 int (*show)(struct panthor_vm *, struct seq_file *) = node->info_ent->data; 2921 struct panthor_vm *vm; 2922 int ret = 0; 2923 2924 mutex_lock(&ptdev->mmu->vm.lock); 2925 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) { 2926 ret = show(vm, m); 2927 if (ret < 0) 2928 break; 2929 2930 seq_puts(m, "\n"); 2931 } 2932 mutex_unlock(&ptdev->mmu->vm.lock); 2933 2934 return ret; 2935 } 2936 2937 static struct drm_info_list panthor_mmu_debugfs_list[] = { 2938 DRM_DEBUGFS_GPUVA_INFO(show_each_vm, show_vm_gpuvas), 2939 }; 2940 2941 /** 2942 * panthor_mmu_debugfs_init() - Initialize MMU debugfs entries 2943 * @minor: Minor. 2944 */ 2945 void panthor_mmu_debugfs_init(struct drm_minor *minor) 2946 { 2947 drm_debugfs_create_files(panthor_mmu_debugfs_list, 2948 ARRAY_SIZE(panthor_mmu_debugfs_list), 2949 minor->debugfs_root, minor); 2950 } 2951 #endif /* CONFIG_DEBUG_FS */ 2952 2953 /** 2954 * panthor_mmu_pt_cache_init() - Initialize the page table cache. 2955 * 2956 * Return: 0 on success, a negative error code otherwise. 2957 */ 2958 int panthor_mmu_pt_cache_init(void) 2959 { 2960 pt_cache = kmem_cache_create("panthor-mmu-pt", SZ_4K, SZ_4K, 0, NULL); 2961 if (!pt_cache) 2962 return -ENOMEM; 2963 2964 return 0; 2965 } 2966 2967 /** 2968 * panthor_mmu_pt_cache_fini() - Destroy the page table cache. 2969 */ 2970 void panthor_mmu_pt_cache_fini(void) 2971 { 2972 kmem_cache_destroy(pt_cache); 2973 } 2974