1 // SPDX-License-Identifier: GPL-2.0 or MIT 2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */ 3 /* Copyright 2023 Collabora ltd. */ 4 5 #include <drm/drm_debugfs.h> 6 #include <drm/drm_drv.h> 7 #include <drm/drm_exec.h> 8 #include <drm/drm_gpuvm.h> 9 #include <drm/drm_managed.h> 10 #include <drm/gpu_scheduler.h> 11 #include <drm/panthor_drm.h> 12 13 #include <linux/atomic.h> 14 #include <linux/bitfield.h> 15 #include <linux/delay.h> 16 #include <linux/dma-mapping.h> 17 #include <linux/interrupt.h> 18 #include <linux/io.h> 19 #include <linux/iopoll.h> 20 #include <linux/io-pgtable.h> 21 #include <linux/iommu.h> 22 #include <linux/kmemleak.h> 23 #include <linux/platform_device.h> 24 #include <linux/pm_runtime.h> 25 #include <linux/rwsem.h> 26 #include <linux/sched.h> 27 #include <linux/shmem_fs.h> 28 #include <linux/sizes.h> 29 30 #include "panthor_device.h" 31 #include "panthor_gem.h" 32 #include "panthor_gpu.h" 33 #include "panthor_heap.h" 34 #include "panthor_mmu.h" 35 #include "panthor_regs.h" 36 #include "panthor_sched.h" 37 38 #define MAX_AS_SLOTS 32 39 40 struct panthor_vm; 41 42 /** 43 * struct panthor_as_slot - Address space slot 44 */ 45 struct panthor_as_slot { 46 /** @vm: VM bound to this slot. NULL is no VM is bound. */ 47 struct panthor_vm *vm; 48 }; 49 50 /** 51 * struct panthor_mmu - MMU related data 52 */ 53 struct panthor_mmu { 54 /** @irq: The MMU irq. */ 55 struct panthor_irq irq; 56 57 /** 58 * @as: Address space related fields. 59 * 60 * The GPU has a limited number of address spaces (AS) slots, forcing 61 * us to re-assign them to re-assign slots on-demand. 62 */ 63 struct { 64 /** @as.slots_lock: Lock protecting access to all other AS fields. */ 65 struct mutex slots_lock; 66 67 /** @as.alloc_mask: Bitmask encoding the allocated slots. */ 68 unsigned long alloc_mask; 69 70 /** @as.faulty_mask: Bitmask encoding the faulty slots. */ 71 unsigned long faulty_mask; 72 73 /** @as.slots: VMs currently bound to the AS slots. */ 74 struct panthor_as_slot slots[MAX_AS_SLOTS]; 75 76 /** 77 * @as.lru_list: List of least recently used VMs. 78 * 79 * We use this list to pick a VM to evict when all slots are 80 * used. 81 * 82 * There should be no more active VMs than there are AS slots, 83 * so this LRU is just here to keep VMs bound until there's 84 * a need to release a slot, thus avoid unnecessary TLB/cache 85 * flushes. 86 */ 87 struct list_head lru_list; 88 } as; 89 90 /** @vm: VMs management fields */ 91 struct { 92 /** @vm.lock: Lock protecting access to list. */ 93 struct mutex lock; 94 95 /** @vm.list: List containing all VMs. */ 96 struct list_head list; 97 98 /** @vm.reset_in_progress: True if a reset is in progress. */ 99 bool reset_in_progress; 100 101 /** @vm.wq: Workqueue used for the VM_BIND queues. */ 102 struct workqueue_struct *wq; 103 } vm; 104 }; 105 106 /** 107 * struct panthor_vm_pool - VM pool object 108 */ 109 struct panthor_vm_pool { 110 /** @xa: Array used for VM handle tracking. */ 111 struct xarray xa; 112 }; 113 114 /** 115 * struct panthor_vma - GPU mapping object 116 * 117 * This is used to track GEM mappings in GPU space. 118 */ 119 struct panthor_vma { 120 /** @base: Inherits from drm_gpuva. */ 121 struct drm_gpuva base; 122 123 /** @node: Used to implement deferred release of VMAs. */ 124 struct list_head node; 125 126 /** 127 * @flags: Combination of drm_panthor_vm_bind_op_flags. 128 * 129 * Only map related flags are accepted. 130 */ 131 u32 flags; 132 }; 133 134 /** 135 * struct panthor_vm_op_ctx - VM operation context 136 * 137 * With VM operations potentially taking place in a dma-signaling path, we 138 * need to make sure everything that might require resource allocation is 139 * pre-allocated upfront. This is what this operation context is far. 140 * 141 * We also collect resources that have been freed, so we can release them 142 * asynchronously, and let the VM_BIND scheduler process the next VM_BIND 143 * request. 144 */ 145 struct panthor_vm_op_ctx { 146 /** @rsvd_page_tables: Pages reserved for the MMU page table update. */ 147 struct { 148 /** @rsvd_page_tables.count: Number of pages reserved. */ 149 u32 count; 150 151 /** @rsvd_page_tables.ptr: Point to the first unused page in the @pages table. */ 152 u32 ptr; 153 154 /** 155 * @rsvd_page_tables.pages: Array of pages to be used for an MMU page table update. 156 * 157 * After an VM operation, there might be free pages left in this array. 158 * They should be returned to the pt_cache as part of the op_ctx cleanup. 159 */ 160 void **pages; 161 } rsvd_page_tables; 162 163 /** 164 * @preallocated_vmas: Pre-allocated VMAs to handle the remap case. 165 * 166 * Partial unmap requests or map requests overlapping existing mappings will 167 * trigger a remap call, which need to register up to three panthor_vma objects 168 * (one for the new mapping, and two for the previous and next mappings). 169 */ 170 struct panthor_vma *preallocated_vmas[3]; 171 172 /** @flags: Combination of drm_panthor_vm_bind_op_flags. */ 173 u32 flags; 174 175 /** @va: Virtual range targeted by the VM operation. */ 176 struct { 177 /** @va.addr: Start address. */ 178 u64 addr; 179 180 /** @va.range: Range size. */ 181 u64 range; 182 } va; 183 184 /** 185 * @returned_vmas: List of panthor_vma objects returned after a VM operation. 186 * 187 * For unmap operations, this will contain all VMAs that were covered by the 188 * specified VA range. 189 * 190 * For map operations, this will contain all VMAs that previously mapped to 191 * the specified VA range. 192 * 193 * Those VMAs, and the resources they point to will be released as part of 194 * the op_ctx cleanup operation. 195 */ 196 struct list_head returned_vmas; 197 198 /** @map: Fields specific to a map operation. */ 199 struct { 200 /** @map.vm_bo: Buffer object to map. */ 201 struct drm_gpuvm_bo *vm_bo; 202 203 /** @map.bo_offset: Offset in the buffer object. */ 204 u64 bo_offset; 205 206 /** 207 * @map.sgt: sg-table pointing to pages backing the GEM object. 208 * 209 * This is gathered at job creation time, such that we don't have 210 * to allocate in ::run_job(). 211 */ 212 struct sg_table *sgt; 213 214 /** 215 * @map.new_vma: The new VMA object that will be inserted to the VA tree. 216 */ 217 struct panthor_vma *new_vma; 218 } map; 219 }; 220 221 /** 222 * struct panthor_vm - VM object 223 * 224 * A VM is an object representing a GPU (or MCU) virtual address space. 225 * It embeds the MMU page table for this address space, a tree containing 226 * all the virtual mappings of GEM objects, and other things needed to manage 227 * the VM. 228 * 229 * Except for the MCU VM, which is managed by the kernel, all other VMs are 230 * created by userspace and mostly managed by userspace, using the 231 * %DRM_IOCTL_PANTHOR_VM_BIND ioctl. 232 * 233 * A portion of the virtual address space is reserved for kernel objects, 234 * like heap chunks, and userspace gets to decide how much of the virtual 235 * address space is left to the kernel (half of the virtual address space 236 * by default). 237 */ 238 struct panthor_vm { 239 /** 240 * @base: Inherit from drm_gpuvm. 241 * 242 * We delegate all the VA management to the common drm_gpuvm framework 243 * and only implement hooks to update the MMU page table. 244 */ 245 struct drm_gpuvm base; 246 247 /** 248 * @sched: Scheduler used for asynchronous VM_BIND request. 249 * 250 * We use a 1:1 scheduler here. 251 */ 252 struct drm_gpu_scheduler sched; 253 254 /** 255 * @entity: Scheduling entity representing the VM_BIND queue. 256 * 257 * There's currently one bind queue per VM. It doesn't make sense to 258 * allow more given the VM operations are serialized anyway. 259 */ 260 struct drm_sched_entity entity; 261 262 /** @ptdev: Device. */ 263 struct panthor_device *ptdev; 264 265 /** @memattr: Value to program to the AS_MEMATTR register. */ 266 u64 memattr; 267 268 /** @pgtbl_ops: Page table operations. */ 269 struct io_pgtable_ops *pgtbl_ops; 270 271 /** @root_page_table: Stores the root page table pointer. */ 272 void *root_page_table; 273 274 /** 275 * @op_lock: Lock used to serialize operations on a VM. 276 * 277 * The serialization of jobs queued to the VM_BIND queue is already 278 * taken care of by drm_sched, but we need to serialize synchronous 279 * and asynchronous VM_BIND request. This is what this lock is for. 280 */ 281 struct mutex op_lock; 282 283 /** 284 * @op_ctx: The context attached to the currently executing VM operation. 285 * 286 * NULL when no operation is in progress. 287 */ 288 struct panthor_vm_op_ctx *op_ctx; 289 290 /** 291 * @mm: Memory management object representing the auto-VA/kernel-VA. 292 * 293 * Used to auto-allocate VA space for kernel-managed objects (tiler 294 * heaps, ...). 295 * 296 * For the MCU VM, this is managing the VA range that's used to map 297 * all shared interfaces. 298 * 299 * For user VMs, the range is specified by userspace, and must not 300 * exceed half of the VA space addressable. 301 */ 302 struct drm_mm mm; 303 304 /** @mm_lock: Lock protecting the @mm field. */ 305 struct mutex mm_lock; 306 307 /** @kernel_auto_va: Automatic VA-range for kernel BOs. */ 308 struct { 309 /** @kernel_auto_va.start: Start of the automatic VA-range for kernel BOs. */ 310 u64 start; 311 312 /** @kernel_auto_va.size: Size of the automatic VA-range for kernel BOs. */ 313 u64 end; 314 } kernel_auto_va; 315 316 /** @as: Address space related fields. */ 317 struct { 318 /** 319 * @as.id: ID of the address space this VM is bound to. 320 * 321 * A value of -1 means the VM is inactive/not bound. 322 */ 323 int id; 324 325 /** @as.active_cnt: Number of active users of this VM. */ 326 refcount_t active_cnt; 327 328 /** 329 * @as.lru_node: Used to instead the VM in the panthor_mmu::as::lru_list. 330 * 331 * Active VMs should not be inserted in the LRU list. 332 */ 333 struct list_head lru_node; 334 } as; 335 336 /** 337 * @heaps: Tiler heap related fields. 338 */ 339 struct { 340 /** 341 * @heaps.pool: The heap pool attached to this VM. 342 * 343 * Will stay NULL until someone creates a heap context on this VM. 344 */ 345 struct panthor_heap_pool *pool; 346 347 /** @heaps.lock: Lock used to protect access to @pool. */ 348 struct mutex lock; 349 } heaps; 350 351 /** @node: Used to insert the VM in the panthor_mmu::vm::list. */ 352 struct list_head node; 353 354 /** @for_mcu: True if this is the MCU VM. */ 355 bool for_mcu; 356 357 /** 358 * @destroyed: True if the VM was destroyed. 359 * 360 * No further bind requests should be queued to a destroyed VM. 361 */ 362 bool destroyed; 363 364 /** 365 * @unusable: True if the VM has turned unusable because something 366 * bad happened during an asynchronous request. 367 * 368 * We don't try to recover from such failures, because this implies 369 * informing userspace about the specific operation that failed, and 370 * hoping the userspace driver can replay things from there. This all 371 * sounds very complicated for little gain. 372 * 373 * Instead, we should just flag the VM as unusable, and fail any 374 * further request targeting this VM. 375 * 376 * We also provide a way to query a VM state, so userspace can destroy 377 * it and create a new one. 378 * 379 * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST 380 * situation, where the logical device needs to be re-created. 381 */ 382 bool unusable; 383 384 /** 385 * @unhandled_fault: Unhandled fault happened. 386 * 387 * This should be reported to the scheduler, and the queue/group be 388 * flagged as faulty as a result. 389 */ 390 bool unhandled_fault; 391 }; 392 393 /** 394 * struct panthor_vm_bind_job - VM bind job 395 */ 396 struct panthor_vm_bind_job { 397 /** @base: Inherit from drm_sched_job. */ 398 struct drm_sched_job base; 399 400 /** @refcount: Reference count. */ 401 struct kref refcount; 402 403 /** @cleanup_op_ctx_work: Work used to cleanup the VM operation context. */ 404 struct work_struct cleanup_op_ctx_work; 405 406 /** @vm: VM targeted by the VM operation. */ 407 struct panthor_vm *vm; 408 409 /** @ctx: Operation context. */ 410 struct panthor_vm_op_ctx ctx; 411 }; 412 413 /* 414 * @pt_cache: Cache used to allocate MMU page tables. 415 * 416 * The pre-allocation pattern forces us to over-allocate to plan for 417 * the worst case scenario, and return the pages we didn't use. 418 * 419 * Having a kmem_cache allows us to speed allocations. 420 */ 421 static struct kmem_cache *pt_cache; 422 423 /** 424 * alloc_pt() - Custom page table allocator 425 * @cookie: Cookie passed at page table allocation time. 426 * @size: Size of the page table. This size should be fixed, 427 * and determined at creation time based on the granule size. 428 * @gfp: GFP flags. 429 * 430 * We want a custom allocator so we can use a cache for page table 431 * allocations and amortize the cost of the over-reservation that's 432 * done to allow asynchronous VM operations. 433 * 434 * Return: non-NULL on success, NULL if the allocation failed for any 435 * reason. 436 */ 437 static void *alloc_pt(void *cookie, size_t size, gfp_t gfp) 438 { 439 struct panthor_vm *vm = cookie; 440 void *page; 441 442 /* Allocation of the root page table happening during init. */ 443 if (unlikely(!vm->root_page_table)) { 444 struct page *p; 445 446 drm_WARN_ON(&vm->ptdev->base, vm->op_ctx); 447 p = alloc_pages_node(dev_to_node(vm->ptdev->base.dev), 448 gfp | __GFP_ZERO, get_order(size)); 449 page = p ? page_address(p) : NULL; 450 vm->root_page_table = page; 451 return page; 452 } 453 454 /* We're not supposed to have anything bigger than 4k here, because we picked a 455 * 4k granule size at init time. 456 */ 457 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) 458 return NULL; 459 460 /* We must have some op_ctx attached to the VM and it must have at least one 461 * free page. 462 */ 463 if (drm_WARN_ON(&vm->ptdev->base, !vm->op_ctx) || 464 drm_WARN_ON(&vm->ptdev->base, 465 vm->op_ctx->rsvd_page_tables.ptr >= vm->op_ctx->rsvd_page_tables.count)) 466 return NULL; 467 468 page = vm->op_ctx->rsvd_page_tables.pages[vm->op_ctx->rsvd_page_tables.ptr++]; 469 memset(page, 0, SZ_4K); 470 471 /* Page table entries don't use virtual addresses, which trips out 472 * kmemleak. kmemleak_alloc_phys() might work, but physical addresses 473 * are mixed with other fields, and I fear kmemleak won't detect that 474 * either. 475 * 476 * Let's just ignore memory passed to the page-table driver for now. 477 */ 478 kmemleak_ignore(page); 479 return page; 480 } 481 482 /** 483 * free_pt() - Custom page table free function 484 * @cookie: Cookie passed at page table allocation time. 485 * @data: Page table to free. 486 * @size: Size of the page table. This size should be fixed, 487 * and determined at creation time based on the granule size. 488 */ 489 static void free_pt(void *cookie, void *data, size_t size) 490 { 491 struct panthor_vm *vm = cookie; 492 493 if (unlikely(vm->root_page_table == data)) { 494 free_pages((unsigned long)data, get_order(size)); 495 vm->root_page_table = NULL; 496 return; 497 } 498 499 if (drm_WARN_ON(&vm->ptdev->base, size != SZ_4K)) 500 return; 501 502 /* Return the page to the pt_cache. */ 503 kmem_cache_free(pt_cache, data); 504 } 505 506 static int wait_ready(struct panthor_device *ptdev, u32 as_nr) 507 { 508 int ret; 509 u32 val; 510 511 /* Wait for the MMU status to indicate there is no active command, in 512 * case one is pending. 513 */ 514 ret = gpu_read_relaxed_poll_timeout_atomic(ptdev, AS_STATUS(as_nr), val, 515 !(val & AS_STATUS_AS_ACTIVE), 516 10, 100000); 517 518 if (ret) { 519 panthor_device_schedule_reset(ptdev); 520 drm_err(&ptdev->base, "AS_ACTIVE bit stuck\n"); 521 } 522 523 return ret; 524 } 525 526 static int write_cmd(struct panthor_device *ptdev, u32 as_nr, u32 cmd) 527 { 528 int status; 529 530 /* write AS_COMMAND when MMU is ready to accept another command */ 531 status = wait_ready(ptdev, as_nr); 532 if (!status) 533 gpu_write(ptdev, AS_COMMAND(as_nr), cmd); 534 535 return status; 536 } 537 538 static void lock_region(struct panthor_device *ptdev, u32 as_nr, 539 u64 region_start, u64 size) 540 { 541 u8 region_width; 542 u64 region; 543 u64 region_end = region_start + size; 544 545 if (!size) 546 return; 547 548 /* 549 * The locked region is a naturally aligned power of 2 block encoded as 550 * log2 minus(1). 551 * Calculate the desired start/end and look for the highest bit which 552 * differs. The smallest naturally aligned block must include this bit 553 * change, the desired region starts with this bit (and subsequent bits) 554 * zeroed and ends with the bit (and subsequent bits) set to one. 555 */ 556 region_width = max(fls64(region_start ^ (region_end - 1)), 557 const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1; 558 559 /* 560 * Mask off the low bits of region_start (which would be ignored by 561 * the hardware anyway) 562 */ 563 region_start &= GENMASK_ULL(63, region_width); 564 565 region = region_width | region_start; 566 567 /* Lock the region that needs to be updated */ 568 gpu_write64(ptdev, AS_LOCKADDR(as_nr), region); 569 write_cmd(ptdev, as_nr, AS_COMMAND_LOCK); 570 } 571 572 static int mmu_hw_do_flush_on_gpu_ctrl(struct panthor_device *ptdev, int as_nr, 573 u32 op) 574 { 575 const u32 l2_flush_op = CACHE_CLEAN | CACHE_INV; 576 u32 lsc_flush_op = 0; 577 int ret; 578 579 if (op == AS_COMMAND_FLUSH_MEM) 580 lsc_flush_op = CACHE_CLEAN | CACHE_INV; 581 582 ret = wait_ready(ptdev, as_nr); 583 if (ret) 584 return ret; 585 586 ret = panthor_gpu_flush_caches(ptdev, l2_flush_op, lsc_flush_op, 0); 587 if (ret) 588 return ret; 589 590 /* 591 * Explicitly unlock the region as the AS is not unlocked automatically 592 * at the end of the GPU_CONTROL cache flush command, unlike 593 * AS_COMMAND_FLUSH_MEM or AS_COMMAND_FLUSH_PT. 594 */ 595 write_cmd(ptdev, as_nr, AS_COMMAND_UNLOCK); 596 597 /* Wait for the unlock command to complete */ 598 return wait_ready(ptdev, as_nr); 599 } 600 601 static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr, 602 u64 iova, u64 size, u32 op) 603 { 604 lockdep_assert_held(&ptdev->mmu->as.slots_lock); 605 606 if (as_nr < 0) 607 return 0; 608 609 /* 610 * If the AS number is greater than zero, then we can be sure 611 * the device is up and running, so we don't need to explicitly 612 * power it up 613 */ 614 615 if (op != AS_COMMAND_UNLOCK) 616 lock_region(ptdev, as_nr, iova, size); 617 618 if (op == AS_COMMAND_FLUSH_MEM || op == AS_COMMAND_FLUSH_PT) 619 return mmu_hw_do_flush_on_gpu_ctrl(ptdev, as_nr, op); 620 621 /* Run the MMU operation */ 622 write_cmd(ptdev, as_nr, op); 623 624 /* Wait for the flush to complete */ 625 return wait_ready(ptdev, as_nr); 626 } 627 628 static int mmu_hw_do_operation(struct panthor_vm *vm, 629 u64 iova, u64 size, u32 op) 630 { 631 struct panthor_device *ptdev = vm->ptdev; 632 int ret; 633 634 mutex_lock(&ptdev->mmu->as.slots_lock); 635 ret = mmu_hw_do_operation_locked(ptdev, vm->as.id, iova, size, op); 636 mutex_unlock(&ptdev->mmu->as.slots_lock); 637 638 return ret; 639 } 640 641 static int panthor_mmu_as_enable(struct panthor_device *ptdev, u32 as_nr, 642 u64 transtab, u64 transcfg, u64 memattr) 643 { 644 int ret; 645 646 ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM); 647 if (ret) 648 return ret; 649 650 gpu_write64(ptdev, AS_TRANSTAB(as_nr), transtab); 651 gpu_write64(ptdev, AS_MEMATTR(as_nr), memattr); 652 gpu_write64(ptdev, AS_TRANSCFG(as_nr), transcfg); 653 654 return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE); 655 } 656 657 static int panthor_mmu_as_disable(struct panthor_device *ptdev, u32 as_nr) 658 { 659 int ret; 660 661 ret = mmu_hw_do_operation_locked(ptdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM); 662 if (ret) 663 return ret; 664 665 gpu_write64(ptdev, AS_TRANSTAB(as_nr), 0); 666 gpu_write64(ptdev, AS_MEMATTR(as_nr), 0); 667 gpu_write64(ptdev, AS_TRANSCFG(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED); 668 669 return write_cmd(ptdev, as_nr, AS_COMMAND_UPDATE); 670 } 671 672 static u32 panthor_mmu_fault_mask(struct panthor_device *ptdev, u32 value) 673 { 674 /* Bits 16 to 31 mean REQ_COMPLETE. */ 675 return value & GENMASK(15, 0); 676 } 677 678 static u32 panthor_mmu_as_fault_mask(struct panthor_device *ptdev, u32 as) 679 { 680 return BIT(as); 681 } 682 683 /** 684 * panthor_vm_has_unhandled_faults() - Check if a VM has unhandled faults 685 * @vm: VM to check. 686 * 687 * Return: true if the VM has unhandled faults, false otherwise. 688 */ 689 bool panthor_vm_has_unhandled_faults(struct panthor_vm *vm) 690 { 691 return vm->unhandled_fault; 692 } 693 694 /** 695 * panthor_vm_is_unusable() - Check if the VM is still usable 696 * @vm: VM to check. 697 * 698 * Return: true if the VM is unusable, false otherwise. 699 */ 700 bool panthor_vm_is_unusable(struct panthor_vm *vm) 701 { 702 return vm->unusable; 703 } 704 705 static void panthor_vm_release_as_locked(struct panthor_vm *vm) 706 { 707 struct panthor_device *ptdev = vm->ptdev; 708 709 lockdep_assert_held(&ptdev->mmu->as.slots_lock); 710 711 if (drm_WARN_ON(&ptdev->base, vm->as.id < 0)) 712 return; 713 714 ptdev->mmu->as.slots[vm->as.id].vm = NULL; 715 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); 716 refcount_set(&vm->as.active_cnt, 0); 717 list_del_init(&vm->as.lru_node); 718 vm->as.id = -1; 719 } 720 721 /** 722 * panthor_vm_active() - Flag a VM as active 723 * @vm: VM to flag as active. 724 * 725 * Assigns an address space to a VM so it can be used by the GPU/MCU. 726 * 727 * Return: 0 on success, a negative error code otherwise. 728 */ 729 int panthor_vm_active(struct panthor_vm *vm) 730 { 731 struct panthor_device *ptdev = vm->ptdev; 732 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features); 733 struct io_pgtable_cfg *cfg = &io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg; 734 int ret = 0, as, cookie; 735 u64 transtab, transcfg; 736 737 if (!drm_dev_enter(&ptdev->base, &cookie)) 738 return -ENODEV; 739 740 if (refcount_inc_not_zero(&vm->as.active_cnt)) 741 goto out_dev_exit; 742 743 mutex_lock(&ptdev->mmu->as.slots_lock); 744 745 if (refcount_inc_not_zero(&vm->as.active_cnt)) 746 goto out_unlock; 747 748 as = vm->as.id; 749 if (as >= 0) { 750 /* Unhandled pagefault on this AS, the MMU was disabled. We need to 751 * re-enable the MMU after clearing+unmasking the AS interrupts. 752 */ 753 if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) 754 goto out_enable_as; 755 756 goto out_make_active; 757 } 758 759 /* Check for a free AS */ 760 if (vm->for_mcu) { 761 drm_WARN_ON(&ptdev->base, ptdev->mmu->as.alloc_mask & BIT(0)); 762 as = 0; 763 } else { 764 as = ffz(ptdev->mmu->as.alloc_mask | BIT(0)); 765 } 766 767 if (!(BIT(as) & ptdev->gpu_info.as_present)) { 768 struct panthor_vm *lru_vm; 769 770 lru_vm = list_first_entry_or_null(&ptdev->mmu->as.lru_list, 771 struct panthor_vm, 772 as.lru_node); 773 if (drm_WARN_ON(&ptdev->base, !lru_vm)) { 774 ret = -EBUSY; 775 goto out_unlock; 776 } 777 778 drm_WARN_ON(&ptdev->base, refcount_read(&lru_vm->as.active_cnt)); 779 as = lru_vm->as.id; 780 panthor_vm_release_as_locked(lru_vm); 781 } 782 783 /* Assign the free or reclaimed AS to the FD */ 784 vm->as.id = as; 785 set_bit(as, &ptdev->mmu->as.alloc_mask); 786 ptdev->mmu->as.slots[as].vm = vm; 787 788 out_enable_as: 789 transtab = cfg->arm_lpae_s1_cfg.ttbr; 790 transcfg = AS_TRANSCFG_PTW_MEMATTR_WB | 791 AS_TRANSCFG_PTW_RA | 792 AS_TRANSCFG_ADRMODE_AARCH64_4K | 793 AS_TRANSCFG_INA_BITS(55 - va_bits); 794 if (ptdev->coherent) 795 transcfg |= AS_TRANSCFG_PTW_SH_OS; 796 797 /* If the VM is re-activated, we clear the fault. */ 798 vm->unhandled_fault = false; 799 800 /* Unhandled pagefault on this AS, clear the fault and re-enable interrupts 801 * before enabling the AS. 802 */ 803 if (ptdev->mmu->as.faulty_mask & panthor_mmu_as_fault_mask(ptdev, as)) { 804 gpu_write(ptdev, MMU_INT_CLEAR, panthor_mmu_as_fault_mask(ptdev, as)); 805 ptdev->mmu->as.faulty_mask &= ~panthor_mmu_as_fault_mask(ptdev, as); 806 ptdev->mmu->irq.mask |= panthor_mmu_as_fault_mask(ptdev, as); 807 gpu_write(ptdev, MMU_INT_MASK, ~ptdev->mmu->as.faulty_mask); 808 } 809 810 ret = panthor_mmu_as_enable(vm->ptdev, vm->as.id, transtab, transcfg, vm->memattr); 811 812 out_make_active: 813 if (!ret) { 814 refcount_set(&vm->as.active_cnt, 1); 815 list_del_init(&vm->as.lru_node); 816 } 817 818 out_unlock: 819 mutex_unlock(&ptdev->mmu->as.slots_lock); 820 821 out_dev_exit: 822 drm_dev_exit(cookie); 823 return ret; 824 } 825 826 /** 827 * panthor_vm_idle() - Flag a VM idle 828 * @vm: VM to flag as idle. 829 * 830 * When we know the GPU is done with the VM (no more jobs to process), 831 * we can relinquish the AS slot attached to this VM, if any. 832 * 833 * We don't release the slot immediately, but instead place the VM in 834 * the LRU list, so it can be evicted if another VM needs an AS slot. 835 * This way, VMs keep attached to the AS they were given until we run 836 * out of free slot, limiting the number of MMU operations (TLB flush 837 * and other AS updates). 838 */ 839 void panthor_vm_idle(struct panthor_vm *vm) 840 { 841 struct panthor_device *ptdev = vm->ptdev; 842 843 if (!refcount_dec_and_mutex_lock(&vm->as.active_cnt, &ptdev->mmu->as.slots_lock)) 844 return; 845 846 if (!drm_WARN_ON(&ptdev->base, vm->as.id == -1 || !list_empty(&vm->as.lru_node))) 847 list_add_tail(&vm->as.lru_node, &ptdev->mmu->as.lru_list); 848 849 refcount_set(&vm->as.active_cnt, 0); 850 mutex_unlock(&ptdev->mmu->as.slots_lock); 851 } 852 853 u32 panthor_vm_page_size(struct panthor_vm *vm) 854 { 855 const struct io_pgtable *pgt = io_pgtable_ops_to_pgtable(vm->pgtbl_ops); 856 u32 pg_shift = ffs(pgt->cfg.pgsize_bitmap) - 1; 857 858 return 1u << pg_shift; 859 } 860 861 static void panthor_vm_stop(struct panthor_vm *vm) 862 { 863 drm_sched_stop(&vm->sched, NULL); 864 } 865 866 static void panthor_vm_start(struct panthor_vm *vm) 867 { 868 drm_sched_start(&vm->sched, 0); 869 } 870 871 /** 872 * panthor_vm_as() - Get the AS slot attached to a VM 873 * @vm: VM to get the AS slot of. 874 * 875 * Return: -1 if the VM is not assigned an AS slot yet, >= 0 otherwise. 876 */ 877 int panthor_vm_as(struct panthor_vm *vm) 878 { 879 return vm->as.id; 880 } 881 882 static size_t get_pgsize(u64 addr, size_t size, size_t *count) 883 { 884 /* 885 * io-pgtable only operates on multiple pages within a single table 886 * entry, so we need to split at boundaries of the table size, i.e. 887 * the next block size up. The distance from address A to the next 888 * boundary of block size B is logically B - A % B, but in unsigned 889 * two's complement where B is a power of two we get the equivalence 890 * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :) 891 */ 892 size_t blk_offset = -addr % SZ_2M; 893 894 if (blk_offset || size < SZ_2M) { 895 *count = min_not_zero(blk_offset, size) / SZ_4K; 896 return SZ_4K; 897 } 898 blk_offset = -addr % SZ_1G ?: SZ_1G; 899 *count = min(blk_offset, size) / SZ_2M; 900 return SZ_2M; 901 } 902 903 static int panthor_vm_flush_range(struct panthor_vm *vm, u64 iova, u64 size) 904 { 905 struct panthor_device *ptdev = vm->ptdev; 906 int ret = 0, cookie; 907 908 if (vm->as.id < 0) 909 return 0; 910 911 /* If the device is unplugged, we just silently skip the flush. */ 912 if (!drm_dev_enter(&ptdev->base, &cookie)) 913 return 0; 914 915 ret = mmu_hw_do_operation(vm, iova, size, AS_COMMAND_FLUSH_PT); 916 917 drm_dev_exit(cookie); 918 return ret; 919 } 920 921 static int panthor_vm_unmap_pages(struct panthor_vm *vm, u64 iova, u64 size) 922 { 923 struct panthor_device *ptdev = vm->ptdev; 924 struct io_pgtable_ops *ops = vm->pgtbl_ops; 925 u64 offset = 0; 926 927 drm_dbg(&ptdev->base, "unmap: as=%d, iova=%llx, len=%llx", vm->as.id, iova, size); 928 929 while (offset < size) { 930 size_t unmapped_sz = 0, pgcount; 931 size_t pgsize = get_pgsize(iova + offset, size - offset, &pgcount); 932 933 unmapped_sz = ops->unmap_pages(ops, iova + offset, pgsize, pgcount, NULL); 934 935 if (drm_WARN_ON(&ptdev->base, unmapped_sz != pgsize * pgcount)) { 936 drm_err(&ptdev->base, "failed to unmap range %llx-%llx (requested range %llx-%llx)\n", 937 iova + offset + unmapped_sz, 938 iova + offset + pgsize * pgcount, 939 iova, iova + size); 940 panthor_vm_flush_range(vm, iova, offset + unmapped_sz); 941 return -EINVAL; 942 } 943 offset += unmapped_sz; 944 } 945 946 return panthor_vm_flush_range(vm, iova, size); 947 } 948 949 static int 950 panthor_vm_map_pages(struct panthor_vm *vm, u64 iova, int prot, 951 struct sg_table *sgt, u64 offset, u64 size) 952 { 953 struct panthor_device *ptdev = vm->ptdev; 954 unsigned int count; 955 struct scatterlist *sgl; 956 struct io_pgtable_ops *ops = vm->pgtbl_ops; 957 u64 start_iova = iova; 958 int ret; 959 960 if (!size) 961 return 0; 962 963 for_each_sgtable_dma_sg(sgt, sgl, count) { 964 dma_addr_t paddr = sg_dma_address(sgl); 965 size_t len = sg_dma_len(sgl); 966 967 if (len <= offset) { 968 offset -= len; 969 continue; 970 } 971 972 paddr += offset; 973 len -= offset; 974 len = min_t(size_t, len, size); 975 size -= len; 976 977 drm_dbg(&ptdev->base, "map: as=%d, iova=%llx, paddr=%pad, len=%zx", 978 vm->as.id, iova, &paddr, len); 979 980 while (len) { 981 size_t pgcount, mapped = 0; 982 size_t pgsize = get_pgsize(iova | paddr, len, &pgcount); 983 984 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot, 985 GFP_KERNEL, &mapped); 986 iova += mapped; 987 paddr += mapped; 988 len -= mapped; 989 990 if (drm_WARN_ON(&ptdev->base, !ret && !mapped)) 991 ret = -ENOMEM; 992 993 if (ret) { 994 /* If something failed, unmap what we've already mapped before 995 * returning. The unmap call is not supposed to fail. 996 */ 997 drm_WARN_ON(&ptdev->base, 998 panthor_vm_unmap_pages(vm, start_iova, 999 iova - start_iova)); 1000 return ret; 1001 } 1002 } 1003 1004 if (!size) 1005 break; 1006 1007 offset = 0; 1008 } 1009 1010 return panthor_vm_flush_range(vm, start_iova, iova - start_iova); 1011 } 1012 1013 static int flags_to_prot(u32 flags) 1014 { 1015 int prot = 0; 1016 1017 if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC) 1018 prot |= IOMMU_NOEXEC; 1019 1020 if (!(flags & DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED)) 1021 prot |= IOMMU_CACHE; 1022 1023 if (flags & DRM_PANTHOR_VM_BIND_OP_MAP_READONLY) 1024 prot |= IOMMU_READ; 1025 else 1026 prot |= IOMMU_READ | IOMMU_WRITE; 1027 1028 return prot; 1029 } 1030 1031 /** 1032 * panthor_vm_alloc_va() - Allocate a region in the auto-va space 1033 * @vm: VM to allocate a region on. 1034 * @va: start of the VA range. Can be PANTHOR_VM_KERNEL_AUTO_VA if the user 1035 * wants the VA to be automatically allocated from the auto-VA range. 1036 * @size: size of the VA range. 1037 * @va_node: drm_mm_node to initialize. Must be zero-initialized. 1038 * 1039 * Some GPU objects, like heap chunks, are fully managed by the kernel and 1040 * need to be mapped to the userspace VM, in the region reserved for kernel 1041 * objects. 1042 * 1043 * This function takes care of allocating a region in the kernel auto-VA space. 1044 * 1045 * Return: 0 on success, an error code otherwise. 1046 */ 1047 int 1048 panthor_vm_alloc_va(struct panthor_vm *vm, u64 va, u64 size, 1049 struct drm_mm_node *va_node) 1050 { 1051 ssize_t vm_pgsz = panthor_vm_page_size(vm); 1052 int ret; 1053 1054 if (!size || !IS_ALIGNED(size, vm_pgsz)) 1055 return -EINVAL; 1056 1057 if (va != PANTHOR_VM_KERNEL_AUTO_VA && !IS_ALIGNED(va, vm_pgsz)) 1058 return -EINVAL; 1059 1060 mutex_lock(&vm->mm_lock); 1061 if (va != PANTHOR_VM_KERNEL_AUTO_VA) { 1062 va_node->start = va; 1063 va_node->size = size; 1064 ret = drm_mm_reserve_node(&vm->mm, va_node); 1065 } else { 1066 ret = drm_mm_insert_node_in_range(&vm->mm, va_node, size, 1067 size >= SZ_2M ? SZ_2M : SZ_4K, 1068 0, vm->kernel_auto_va.start, 1069 vm->kernel_auto_va.end, 1070 DRM_MM_INSERT_BEST); 1071 } 1072 mutex_unlock(&vm->mm_lock); 1073 1074 return ret; 1075 } 1076 1077 /** 1078 * panthor_vm_free_va() - Free a region allocated with panthor_vm_alloc_va() 1079 * @vm: VM to free the region on. 1080 * @va_node: Memory node representing the region to free. 1081 */ 1082 void panthor_vm_free_va(struct panthor_vm *vm, struct drm_mm_node *va_node) 1083 { 1084 mutex_lock(&vm->mm_lock); 1085 drm_mm_remove_node(va_node); 1086 mutex_unlock(&vm->mm_lock); 1087 } 1088 1089 static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo) 1090 { 1091 struct panthor_gem_object *bo = to_panthor_bo(vm_bo->obj); 1092 struct drm_gpuvm *vm = vm_bo->vm; 1093 bool unpin; 1094 1095 /* We must retain the GEM before calling drm_gpuvm_bo_put(), 1096 * otherwise the mutex might be destroyed while we hold it. 1097 * Same goes for the VM, since we take the VM resv lock. 1098 */ 1099 drm_gem_object_get(&bo->base.base); 1100 drm_gpuvm_get(vm); 1101 1102 /* We take the resv lock to protect against concurrent accesses to the 1103 * gpuvm evicted/extobj lists that are modified in 1104 * drm_gpuvm_bo_destroy(), which is called if drm_gpuvm_bo_put() 1105 * releases sthe last vm_bo reference. 1106 * We take the BO GPUVA list lock to protect the vm_bo removal from the 1107 * GEM vm_bo list. 1108 */ 1109 dma_resv_lock(drm_gpuvm_resv(vm), NULL); 1110 mutex_lock(&bo->gpuva_list_lock); 1111 unpin = drm_gpuvm_bo_put(vm_bo); 1112 mutex_unlock(&bo->gpuva_list_lock); 1113 dma_resv_unlock(drm_gpuvm_resv(vm)); 1114 1115 /* If the vm_bo object was destroyed, release the pin reference that 1116 * was hold by this object. 1117 */ 1118 if (unpin && !drm_gem_is_imported(&bo->base.base)) 1119 drm_gem_shmem_unpin(&bo->base); 1120 1121 drm_gpuvm_put(vm); 1122 drm_gem_object_put(&bo->base.base); 1123 } 1124 1125 static void panthor_vm_cleanup_op_ctx(struct panthor_vm_op_ctx *op_ctx, 1126 struct panthor_vm *vm) 1127 { 1128 struct panthor_vma *vma, *tmp_vma; 1129 1130 u32 remaining_pt_count = op_ctx->rsvd_page_tables.count - 1131 op_ctx->rsvd_page_tables.ptr; 1132 1133 if (remaining_pt_count) { 1134 kmem_cache_free_bulk(pt_cache, remaining_pt_count, 1135 op_ctx->rsvd_page_tables.pages + 1136 op_ctx->rsvd_page_tables.ptr); 1137 } 1138 1139 kfree(op_ctx->rsvd_page_tables.pages); 1140 1141 if (op_ctx->map.vm_bo) 1142 panthor_vm_bo_put(op_ctx->map.vm_bo); 1143 1144 for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) 1145 kfree(op_ctx->preallocated_vmas[i]); 1146 1147 list_for_each_entry_safe(vma, tmp_vma, &op_ctx->returned_vmas, node) { 1148 list_del(&vma->node); 1149 panthor_vm_bo_put(vma->base.vm_bo); 1150 kfree(vma); 1151 } 1152 } 1153 1154 static struct panthor_vma * 1155 panthor_vm_op_ctx_get_vma(struct panthor_vm_op_ctx *op_ctx) 1156 { 1157 for (u32 i = 0; i < ARRAY_SIZE(op_ctx->preallocated_vmas); i++) { 1158 struct panthor_vma *vma = op_ctx->preallocated_vmas[i]; 1159 1160 if (vma) { 1161 op_ctx->preallocated_vmas[i] = NULL; 1162 return vma; 1163 } 1164 } 1165 1166 return NULL; 1167 } 1168 1169 static int 1170 panthor_vm_op_ctx_prealloc_vmas(struct panthor_vm_op_ctx *op_ctx) 1171 { 1172 u32 vma_count; 1173 1174 switch (op_ctx->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) { 1175 case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: 1176 /* One VMA for the new mapping, and two more VMAs for the remap case 1177 * which might contain both a prev and next VA. 1178 */ 1179 vma_count = 3; 1180 break; 1181 1182 case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: 1183 /* Partial unmaps might trigger a remap with either a prev or a next VA, 1184 * but not both. 1185 */ 1186 vma_count = 1; 1187 break; 1188 1189 default: 1190 return 0; 1191 } 1192 1193 for (u32 i = 0; i < vma_count; i++) { 1194 struct panthor_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); 1195 1196 if (!vma) 1197 return -ENOMEM; 1198 1199 op_ctx->preallocated_vmas[i] = vma; 1200 } 1201 1202 return 0; 1203 } 1204 1205 #define PANTHOR_VM_BIND_OP_MAP_FLAGS \ 1206 (DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \ 1207 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \ 1208 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED | \ 1209 DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) 1210 1211 static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, 1212 struct panthor_vm *vm, 1213 struct panthor_gem_object *bo, 1214 u64 offset, 1215 u64 size, u64 va, 1216 u32 flags) 1217 { 1218 struct drm_gpuvm_bo *preallocated_vm_bo; 1219 struct sg_table *sgt = NULL; 1220 u64 pt_count; 1221 int ret; 1222 1223 if (!bo) 1224 return -EINVAL; 1225 1226 if ((flags & ~PANTHOR_VM_BIND_OP_MAP_FLAGS) || 1227 (flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP) 1228 return -EINVAL; 1229 1230 /* Make sure the VA and size are aligned and in-bounds. */ 1231 if (size > bo->base.base.size || offset > bo->base.base.size - size) 1232 return -EINVAL; 1233 1234 /* If the BO has an exclusive VM attached, it can't be mapped to other VMs. */ 1235 if (bo->exclusive_vm_root_gem && 1236 bo->exclusive_vm_root_gem != panthor_vm_root_gem(vm)) 1237 return -EINVAL; 1238 1239 memset(op_ctx, 0, sizeof(*op_ctx)); 1240 INIT_LIST_HEAD(&op_ctx->returned_vmas); 1241 op_ctx->flags = flags; 1242 op_ctx->va.range = size; 1243 op_ctx->va.addr = va; 1244 1245 ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx); 1246 if (ret) 1247 goto err_cleanup; 1248 1249 if (!drm_gem_is_imported(&bo->base.base)) { 1250 /* Pre-reserve the BO pages, so the map operation doesn't have to 1251 * allocate. 1252 */ 1253 ret = drm_gem_shmem_pin(&bo->base); 1254 if (ret) 1255 goto err_cleanup; 1256 } 1257 1258 sgt = drm_gem_shmem_get_pages_sgt(&bo->base); 1259 if (IS_ERR(sgt)) { 1260 if (!drm_gem_is_imported(&bo->base.base)) 1261 drm_gem_shmem_unpin(&bo->base); 1262 1263 ret = PTR_ERR(sgt); 1264 goto err_cleanup; 1265 } 1266 1267 op_ctx->map.sgt = sgt; 1268 1269 preallocated_vm_bo = drm_gpuvm_bo_create(&vm->base, &bo->base.base); 1270 if (!preallocated_vm_bo) { 1271 if (!drm_gem_is_imported(&bo->base.base)) 1272 drm_gem_shmem_unpin(&bo->base); 1273 1274 ret = -ENOMEM; 1275 goto err_cleanup; 1276 } 1277 1278 /* drm_gpuvm_bo_obtain_prealloc() will call drm_gpuvm_bo_put() on our 1279 * pre-allocated BO if the <BO,VM> association exists. Given we 1280 * only have one ref on preallocated_vm_bo, drm_gpuvm_bo_destroy() will 1281 * be called immediately, and we have to hold the VM resv lock when 1282 * calling this function. 1283 */ 1284 dma_resv_lock(panthor_vm_resv(vm), NULL); 1285 mutex_lock(&bo->gpuva_list_lock); 1286 op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo); 1287 mutex_unlock(&bo->gpuva_list_lock); 1288 dma_resv_unlock(panthor_vm_resv(vm)); 1289 1290 /* If the a vm_bo for this <VM,BO> combination exists, it already 1291 * retains a pin ref, and we can release the one we took earlier. 1292 * 1293 * If our pre-allocated vm_bo is picked, it now retains the pin ref, 1294 * which will be released in panthor_vm_bo_put(). 1295 */ 1296 if (preallocated_vm_bo != op_ctx->map.vm_bo && 1297 !drm_gem_is_imported(&bo->base.base)) 1298 drm_gem_shmem_unpin(&bo->base); 1299 1300 op_ctx->map.bo_offset = offset; 1301 1302 /* L1, L2 and L3 page tables. 1303 * We could optimize L3 allocation by iterating over the sgt and merging 1304 * 2M contiguous blocks, but it's simpler to over-provision and return 1305 * the pages if they're not used. 1306 */ 1307 pt_count = ((ALIGN(va + size, 1ull << 39) - ALIGN_DOWN(va, 1ull << 39)) >> 39) + 1308 ((ALIGN(va + size, 1ull << 30) - ALIGN_DOWN(va, 1ull << 30)) >> 30) + 1309 ((ALIGN(va + size, 1ull << 21) - ALIGN_DOWN(va, 1ull << 21)) >> 21); 1310 1311 op_ctx->rsvd_page_tables.pages = kcalloc(pt_count, 1312 sizeof(*op_ctx->rsvd_page_tables.pages), 1313 GFP_KERNEL); 1314 if (!op_ctx->rsvd_page_tables.pages) { 1315 ret = -ENOMEM; 1316 goto err_cleanup; 1317 } 1318 1319 ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count, 1320 op_ctx->rsvd_page_tables.pages); 1321 op_ctx->rsvd_page_tables.count = ret; 1322 if (ret != pt_count) { 1323 ret = -ENOMEM; 1324 goto err_cleanup; 1325 } 1326 1327 /* Insert BO into the extobj list last, when we know nothing can fail. */ 1328 dma_resv_lock(panthor_vm_resv(vm), NULL); 1329 drm_gpuvm_bo_extobj_add(op_ctx->map.vm_bo); 1330 dma_resv_unlock(panthor_vm_resv(vm)); 1331 1332 return 0; 1333 1334 err_cleanup: 1335 panthor_vm_cleanup_op_ctx(op_ctx, vm); 1336 return ret; 1337 } 1338 1339 static int panthor_vm_prepare_unmap_op_ctx(struct panthor_vm_op_ctx *op_ctx, 1340 struct panthor_vm *vm, 1341 u64 va, u64 size) 1342 { 1343 u32 pt_count = 0; 1344 int ret; 1345 1346 memset(op_ctx, 0, sizeof(*op_ctx)); 1347 INIT_LIST_HEAD(&op_ctx->returned_vmas); 1348 op_ctx->va.range = size; 1349 op_ctx->va.addr = va; 1350 op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP; 1351 1352 /* Pre-allocate L3 page tables to account for the split-2M-block 1353 * situation on unmap. 1354 */ 1355 if (va != ALIGN(va, SZ_2M)) 1356 pt_count++; 1357 1358 if (va + size != ALIGN(va + size, SZ_2M) && 1359 ALIGN(va + size, SZ_2M) != ALIGN(va, SZ_2M)) 1360 pt_count++; 1361 1362 ret = panthor_vm_op_ctx_prealloc_vmas(op_ctx); 1363 if (ret) 1364 goto err_cleanup; 1365 1366 if (pt_count) { 1367 op_ctx->rsvd_page_tables.pages = kcalloc(pt_count, 1368 sizeof(*op_ctx->rsvd_page_tables.pages), 1369 GFP_KERNEL); 1370 if (!op_ctx->rsvd_page_tables.pages) { 1371 ret = -ENOMEM; 1372 goto err_cleanup; 1373 } 1374 1375 ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, pt_count, 1376 op_ctx->rsvd_page_tables.pages); 1377 if (ret != pt_count) { 1378 ret = -ENOMEM; 1379 goto err_cleanup; 1380 } 1381 op_ctx->rsvd_page_tables.count = pt_count; 1382 } 1383 1384 return 0; 1385 1386 err_cleanup: 1387 panthor_vm_cleanup_op_ctx(op_ctx, vm); 1388 return ret; 1389 } 1390 1391 static void panthor_vm_prepare_sync_only_op_ctx(struct panthor_vm_op_ctx *op_ctx, 1392 struct panthor_vm *vm) 1393 { 1394 memset(op_ctx, 0, sizeof(*op_ctx)); 1395 INIT_LIST_HEAD(&op_ctx->returned_vmas); 1396 op_ctx->flags = DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY; 1397 } 1398 1399 /** 1400 * panthor_vm_get_bo_for_va() - Get the GEM object mapped at a virtual address 1401 * @vm: VM to look into. 1402 * @va: Virtual address to search for. 1403 * @bo_offset: Offset of the GEM object mapped at this virtual address. 1404 * Only valid on success. 1405 * 1406 * The object returned by this function might no longer be mapped when the 1407 * function returns. It's the caller responsibility to ensure there's no 1408 * concurrent map/unmap operations making the returned value invalid, or 1409 * make sure it doesn't matter if the object is no longer mapped. 1410 * 1411 * Return: A valid pointer on success, an ERR_PTR() otherwise. 1412 */ 1413 struct panthor_gem_object * 1414 panthor_vm_get_bo_for_va(struct panthor_vm *vm, u64 va, u64 *bo_offset) 1415 { 1416 struct panthor_gem_object *bo = ERR_PTR(-ENOENT); 1417 struct drm_gpuva *gpuva; 1418 struct panthor_vma *vma; 1419 1420 /* Take the VM lock to prevent concurrent map/unmap operations. */ 1421 mutex_lock(&vm->op_lock); 1422 gpuva = drm_gpuva_find_first(&vm->base, va, 1); 1423 vma = gpuva ? container_of(gpuva, struct panthor_vma, base) : NULL; 1424 if (vma && vma->base.gem.obj) { 1425 drm_gem_object_get(vma->base.gem.obj); 1426 bo = to_panthor_bo(vma->base.gem.obj); 1427 *bo_offset = vma->base.gem.offset + (va - vma->base.va.addr); 1428 } 1429 mutex_unlock(&vm->op_lock); 1430 1431 return bo; 1432 } 1433 1434 #define PANTHOR_VM_MIN_KERNEL_VA_SIZE SZ_256M 1435 1436 static u64 1437 panthor_vm_create_get_user_va_range(const struct drm_panthor_vm_create *args, 1438 u64 full_va_range) 1439 { 1440 u64 user_va_range; 1441 1442 /* Make sure we have a minimum amount of VA space for kernel objects. */ 1443 if (full_va_range < PANTHOR_VM_MIN_KERNEL_VA_SIZE) 1444 return 0; 1445 1446 if (args->user_va_range) { 1447 /* Use the user provided value if != 0. */ 1448 user_va_range = args->user_va_range; 1449 } else if (TASK_SIZE_OF(current) < full_va_range) { 1450 /* If the task VM size is smaller than the GPU VA range, pick this 1451 * as our default user VA range, so userspace can CPU/GPU map buffers 1452 * at the same address. 1453 */ 1454 user_va_range = TASK_SIZE_OF(current); 1455 } else { 1456 /* If the GPU VA range is smaller than the task VM size, we 1457 * just have to live with the fact we won't be able to map 1458 * all buffers at the same GPU/CPU address. 1459 * 1460 * If the GPU VA range is bigger than 4G (more than 32-bit of 1461 * VA), we split the range in two, and assign half of it to 1462 * the user and the other half to the kernel, if it's not, we 1463 * keep the kernel VA space as small as possible. 1464 */ 1465 user_va_range = full_va_range > SZ_4G ? 1466 full_va_range / 2 : 1467 full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE; 1468 } 1469 1470 if (full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE < user_va_range) 1471 user_va_range = full_va_range - PANTHOR_VM_MIN_KERNEL_VA_SIZE; 1472 1473 return user_va_range; 1474 } 1475 1476 #define PANTHOR_VM_CREATE_FLAGS 0 1477 1478 static int 1479 panthor_vm_create_check_args(const struct panthor_device *ptdev, 1480 const struct drm_panthor_vm_create *args, 1481 u64 *kernel_va_start, u64 *kernel_va_range) 1482 { 1483 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features); 1484 u64 full_va_range = 1ull << va_bits; 1485 u64 user_va_range; 1486 1487 if (args->flags & ~PANTHOR_VM_CREATE_FLAGS) 1488 return -EINVAL; 1489 1490 user_va_range = panthor_vm_create_get_user_va_range(args, full_va_range); 1491 if (!user_va_range || (args->user_va_range && args->user_va_range > user_va_range)) 1492 return -EINVAL; 1493 1494 /* Pick a kernel VA range that's a power of two, to have a clear split. */ 1495 *kernel_va_range = rounddown_pow_of_two(full_va_range - user_va_range); 1496 *kernel_va_start = full_va_range - *kernel_va_range; 1497 return 0; 1498 } 1499 1500 /* 1501 * Only 32 VMs per open file. If that becomes a limiting factor, we can 1502 * increase this number. 1503 */ 1504 #define PANTHOR_MAX_VMS_PER_FILE 32 1505 1506 /** 1507 * panthor_vm_pool_create_vm() - Create a VM 1508 * @ptdev: The panthor device 1509 * @pool: The VM to create this VM on. 1510 * @args: VM creation args. 1511 * 1512 * Return: a positive VM ID on success, a negative error code otherwise. 1513 */ 1514 int panthor_vm_pool_create_vm(struct panthor_device *ptdev, 1515 struct panthor_vm_pool *pool, 1516 struct drm_panthor_vm_create *args) 1517 { 1518 u64 kernel_va_start, kernel_va_range; 1519 struct panthor_vm *vm; 1520 int ret; 1521 u32 id; 1522 1523 ret = panthor_vm_create_check_args(ptdev, args, &kernel_va_start, &kernel_va_range); 1524 if (ret) 1525 return ret; 1526 1527 vm = panthor_vm_create(ptdev, false, kernel_va_start, kernel_va_range, 1528 kernel_va_start, kernel_va_range); 1529 if (IS_ERR(vm)) 1530 return PTR_ERR(vm); 1531 1532 ret = xa_alloc(&pool->xa, &id, vm, 1533 XA_LIMIT(1, PANTHOR_MAX_VMS_PER_FILE), GFP_KERNEL); 1534 1535 if (ret) { 1536 panthor_vm_put(vm); 1537 return ret; 1538 } 1539 1540 args->user_va_range = kernel_va_start; 1541 return id; 1542 } 1543 1544 static void panthor_vm_destroy(struct panthor_vm *vm) 1545 { 1546 if (!vm) 1547 return; 1548 1549 vm->destroyed = true; 1550 1551 mutex_lock(&vm->heaps.lock); 1552 panthor_heap_pool_destroy(vm->heaps.pool); 1553 vm->heaps.pool = NULL; 1554 mutex_unlock(&vm->heaps.lock); 1555 1556 drm_WARN_ON(&vm->ptdev->base, 1557 panthor_vm_unmap_range(vm, vm->base.mm_start, vm->base.mm_range)); 1558 panthor_vm_put(vm); 1559 } 1560 1561 /** 1562 * panthor_vm_pool_destroy_vm() - Destroy a VM. 1563 * @pool: VM pool. 1564 * @handle: VM handle. 1565 * 1566 * This function doesn't free the VM object or its resources, it just kills 1567 * all mappings, and makes sure nothing can be mapped after that point. 1568 * 1569 * If there was any active jobs at the time this function is called, these 1570 * jobs should experience page faults and be killed as a result. 1571 * 1572 * The VM resources are freed when the last reference on the VM object is 1573 * dropped. 1574 * 1575 * Return: %0 for success, negative errno value for failure 1576 */ 1577 int panthor_vm_pool_destroy_vm(struct panthor_vm_pool *pool, u32 handle) 1578 { 1579 struct panthor_vm *vm; 1580 1581 vm = xa_erase(&pool->xa, handle); 1582 1583 panthor_vm_destroy(vm); 1584 1585 return vm ? 0 : -EINVAL; 1586 } 1587 1588 /** 1589 * panthor_vm_pool_get_vm() - Retrieve VM object bound to a VM handle 1590 * @pool: VM pool to check. 1591 * @handle: Handle of the VM to retrieve. 1592 * 1593 * Return: A valid pointer if the VM exists, NULL otherwise. 1594 */ 1595 struct panthor_vm * 1596 panthor_vm_pool_get_vm(struct panthor_vm_pool *pool, u32 handle) 1597 { 1598 struct panthor_vm *vm; 1599 1600 xa_lock(&pool->xa); 1601 vm = panthor_vm_get(xa_load(&pool->xa, handle)); 1602 xa_unlock(&pool->xa); 1603 1604 return vm; 1605 } 1606 1607 /** 1608 * panthor_vm_pool_destroy() - Destroy a VM pool. 1609 * @pfile: File. 1610 * 1611 * Destroy all VMs in the pool, and release the pool resources. 1612 * 1613 * Note that VMs can outlive the pool they were created from if other 1614 * objects hold a reference to there VMs. 1615 */ 1616 void panthor_vm_pool_destroy(struct panthor_file *pfile) 1617 { 1618 struct panthor_vm *vm; 1619 unsigned long i; 1620 1621 if (!pfile->vms) 1622 return; 1623 1624 xa_for_each(&pfile->vms->xa, i, vm) 1625 panthor_vm_destroy(vm); 1626 1627 xa_destroy(&pfile->vms->xa); 1628 kfree(pfile->vms); 1629 } 1630 1631 /** 1632 * panthor_vm_pool_create() - Create a VM pool 1633 * @pfile: File. 1634 * 1635 * Return: 0 on success, a negative error code otherwise. 1636 */ 1637 int panthor_vm_pool_create(struct panthor_file *pfile) 1638 { 1639 pfile->vms = kzalloc(sizeof(*pfile->vms), GFP_KERNEL); 1640 if (!pfile->vms) 1641 return -ENOMEM; 1642 1643 xa_init_flags(&pfile->vms->xa, XA_FLAGS_ALLOC1); 1644 return 0; 1645 } 1646 1647 /* dummy TLB ops, the real TLB flush happens in panthor_vm_flush_range() */ 1648 static void mmu_tlb_flush_all(void *cookie) 1649 { 1650 } 1651 1652 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule, void *cookie) 1653 { 1654 } 1655 1656 static const struct iommu_flush_ops mmu_tlb_ops = { 1657 .tlb_flush_all = mmu_tlb_flush_all, 1658 .tlb_flush_walk = mmu_tlb_flush_walk, 1659 }; 1660 1661 static const char *access_type_name(struct panthor_device *ptdev, 1662 u32 fault_status) 1663 { 1664 switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) { 1665 case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC: 1666 return "ATOMIC"; 1667 case AS_FAULTSTATUS_ACCESS_TYPE_READ: 1668 return "READ"; 1669 case AS_FAULTSTATUS_ACCESS_TYPE_WRITE: 1670 return "WRITE"; 1671 case AS_FAULTSTATUS_ACCESS_TYPE_EX: 1672 return "EXECUTE"; 1673 default: 1674 drm_WARN_ON(&ptdev->base, 1); 1675 return NULL; 1676 } 1677 } 1678 1679 static void panthor_mmu_irq_handler(struct panthor_device *ptdev, u32 status) 1680 { 1681 bool has_unhandled_faults = false; 1682 1683 status = panthor_mmu_fault_mask(ptdev, status); 1684 while (status) { 1685 u32 as = ffs(status | (status >> 16)) - 1; 1686 u32 mask = panthor_mmu_as_fault_mask(ptdev, as); 1687 u32 new_int_mask; 1688 u64 addr; 1689 u32 fault_status; 1690 u32 exception_type; 1691 u32 access_type; 1692 u32 source_id; 1693 1694 fault_status = gpu_read(ptdev, AS_FAULTSTATUS(as)); 1695 addr = gpu_read64(ptdev, AS_FAULTADDRESS(as)); 1696 1697 /* decode the fault status */ 1698 exception_type = fault_status & 0xFF; 1699 access_type = (fault_status >> 8) & 0x3; 1700 source_id = (fault_status >> 16); 1701 1702 mutex_lock(&ptdev->mmu->as.slots_lock); 1703 1704 ptdev->mmu->as.faulty_mask |= mask; 1705 new_int_mask = 1706 panthor_mmu_fault_mask(ptdev, ~ptdev->mmu->as.faulty_mask); 1707 1708 /* terminal fault, print info about the fault */ 1709 drm_err(&ptdev->base, 1710 "Unhandled Page fault in AS%d at VA 0x%016llX\n" 1711 "raw fault status: 0x%X\n" 1712 "decoded fault status: %s\n" 1713 "exception type 0x%X: %s\n" 1714 "access type 0x%X: %s\n" 1715 "source id 0x%X\n", 1716 as, addr, 1717 fault_status, 1718 (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"), 1719 exception_type, panthor_exception_name(ptdev, exception_type), 1720 access_type, access_type_name(ptdev, fault_status), 1721 source_id); 1722 1723 /* We don't handle VM faults at the moment, so let's just clear the 1724 * interrupt and let the writer/reader crash. 1725 * Note that COMPLETED irqs are never cleared, but this is fine 1726 * because they are always masked. 1727 */ 1728 gpu_write(ptdev, MMU_INT_CLEAR, mask); 1729 1730 /* Ignore MMU interrupts on this AS until it's been 1731 * re-enabled. 1732 */ 1733 ptdev->mmu->irq.mask = new_int_mask; 1734 1735 if (ptdev->mmu->as.slots[as].vm) 1736 ptdev->mmu->as.slots[as].vm->unhandled_fault = true; 1737 1738 /* Disable the MMU to kill jobs on this AS. */ 1739 panthor_mmu_as_disable(ptdev, as); 1740 mutex_unlock(&ptdev->mmu->as.slots_lock); 1741 1742 status &= ~mask; 1743 has_unhandled_faults = true; 1744 } 1745 1746 if (has_unhandled_faults) 1747 panthor_sched_report_mmu_fault(ptdev); 1748 } 1749 PANTHOR_IRQ_HANDLER(mmu, MMU, panthor_mmu_irq_handler); 1750 1751 /** 1752 * panthor_mmu_suspend() - Suspend the MMU logic 1753 * @ptdev: Device. 1754 * 1755 * All we do here is de-assign the AS slots on all active VMs, so things 1756 * get flushed to the main memory, and no further access to these VMs are 1757 * possible. 1758 * 1759 * We also suspend the MMU IRQ. 1760 */ 1761 void panthor_mmu_suspend(struct panthor_device *ptdev) 1762 { 1763 mutex_lock(&ptdev->mmu->as.slots_lock); 1764 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) { 1765 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; 1766 1767 if (vm) { 1768 drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i)); 1769 panthor_vm_release_as_locked(vm); 1770 } 1771 } 1772 mutex_unlock(&ptdev->mmu->as.slots_lock); 1773 1774 panthor_mmu_irq_suspend(&ptdev->mmu->irq); 1775 } 1776 1777 /** 1778 * panthor_mmu_resume() - Resume the MMU logic 1779 * @ptdev: Device. 1780 * 1781 * Resume the IRQ. 1782 * 1783 * We don't re-enable previously active VMs. We assume other parts of the 1784 * driver will call panthor_vm_active() on the VMs they intend to use. 1785 */ 1786 void panthor_mmu_resume(struct panthor_device *ptdev) 1787 { 1788 mutex_lock(&ptdev->mmu->as.slots_lock); 1789 ptdev->mmu->as.alloc_mask = 0; 1790 ptdev->mmu->as.faulty_mask = 0; 1791 mutex_unlock(&ptdev->mmu->as.slots_lock); 1792 1793 panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0)); 1794 } 1795 1796 /** 1797 * panthor_mmu_pre_reset() - Prepare for a reset 1798 * @ptdev: Device. 1799 * 1800 * Suspend the IRQ, and make sure all VM_BIND queues are stopped, so we 1801 * don't get asked to do a VM operation while the GPU is down. 1802 * 1803 * We don't cleanly shutdown the AS slots here, because the reset might 1804 * come from an AS_ACTIVE_BIT stuck situation. 1805 */ 1806 void panthor_mmu_pre_reset(struct panthor_device *ptdev) 1807 { 1808 struct panthor_vm *vm; 1809 1810 panthor_mmu_irq_suspend(&ptdev->mmu->irq); 1811 1812 mutex_lock(&ptdev->mmu->vm.lock); 1813 ptdev->mmu->vm.reset_in_progress = true; 1814 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) 1815 panthor_vm_stop(vm); 1816 mutex_unlock(&ptdev->mmu->vm.lock); 1817 } 1818 1819 /** 1820 * panthor_mmu_post_reset() - Restore things after a reset 1821 * @ptdev: Device. 1822 * 1823 * Put the MMU logic back in action after a reset. That implies resuming the 1824 * IRQ and re-enabling the VM_BIND queues. 1825 */ 1826 void panthor_mmu_post_reset(struct panthor_device *ptdev) 1827 { 1828 struct panthor_vm *vm; 1829 1830 mutex_lock(&ptdev->mmu->as.slots_lock); 1831 1832 /* Now that the reset is effective, we can assume that none of the 1833 * AS slots are setup, and clear the faulty flags too. 1834 */ 1835 ptdev->mmu->as.alloc_mask = 0; 1836 ptdev->mmu->as.faulty_mask = 0; 1837 1838 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) { 1839 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; 1840 1841 if (vm) 1842 panthor_vm_release_as_locked(vm); 1843 } 1844 1845 mutex_unlock(&ptdev->mmu->as.slots_lock); 1846 1847 panthor_mmu_irq_resume(&ptdev->mmu->irq, panthor_mmu_fault_mask(ptdev, ~0)); 1848 1849 /* Restart the VM_BIND queues. */ 1850 mutex_lock(&ptdev->mmu->vm.lock); 1851 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) { 1852 panthor_vm_start(vm); 1853 } 1854 ptdev->mmu->vm.reset_in_progress = false; 1855 mutex_unlock(&ptdev->mmu->vm.lock); 1856 } 1857 1858 static void panthor_vm_free(struct drm_gpuvm *gpuvm) 1859 { 1860 struct panthor_vm *vm = container_of(gpuvm, struct panthor_vm, base); 1861 struct panthor_device *ptdev = vm->ptdev; 1862 1863 mutex_lock(&vm->heaps.lock); 1864 if (drm_WARN_ON(&ptdev->base, vm->heaps.pool)) 1865 panthor_heap_pool_destroy(vm->heaps.pool); 1866 mutex_unlock(&vm->heaps.lock); 1867 mutex_destroy(&vm->heaps.lock); 1868 1869 mutex_lock(&ptdev->mmu->vm.lock); 1870 list_del(&vm->node); 1871 /* Restore the scheduler state so we can call drm_sched_entity_destroy() 1872 * and drm_sched_fini(). If get there, that means we have no job left 1873 * and no new jobs can be queued, so we can start the scheduler without 1874 * risking interfering with the reset. 1875 */ 1876 if (ptdev->mmu->vm.reset_in_progress) 1877 panthor_vm_start(vm); 1878 mutex_unlock(&ptdev->mmu->vm.lock); 1879 1880 drm_sched_entity_destroy(&vm->entity); 1881 drm_sched_fini(&vm->sched); 1882 1883 mutex_lock(&ptdev->mmu->as.slots_lock); 1884 if (vm->as.id >= 0) { 1885 int cookie; 1886 1887 if (drm_dev_enter(&ptdev->base, &cookie)) { 1888 panthor_mmu_as_disable(ptdev, vm->as.id); 1889 drm_dev_exit(cookie); 1890 } 1891 1892 ptdev->mmu->as.slots[vm->as.id].vm = NULL; 1893 clear_bit(vm->as.id, &ptdev->mmu->as.alloc_mask); 1894 list_del(&vm->as.lru_node); 1895 } 1896 mutex_unlock(&ptdev->mmu->as.slots_lock); 1897 1898 free_io_pgtable_ops(vm->pgtbl_ops); 1899 1900 drm_mm_takedown(&vm->mm); 1901 kfree(vm); 1902 } 1903 1904 /** 1905 * panthor_vm_put() - Release a reference on a VM 1906 * @vm: VM to release the reference on. Can be NULL. 1907 */ 1908 void panthor_vm_put(struct panthor_vm *vm) 1909 { 1910 drm_gpuvm_put(vm ? &vm->base : NULL); 1911 } 1912 1913 /** 1914 * panthor_vm_get() - Get a VM reference 1915 * @vm: VM to get the reference on. Can be NULL. 1916 * 1917 * Return: @vm value. 1918 */ 1919 struct panthor_vm *panthor_vm_get(struct panthor_vm *vm) 1920 { 1921 if (vm) 1922 drm_gpuvm_get(&vm->base); 1923 1924 return vm; 1925 } 1926 1927 /** 1928 * panthor_vm_get_heap_pool() - Get the heap pool attached to a VM 1929 * @vm: VM to query the heap pool on. 1930 * @create: True if the heap pool should be created when it doesn't exist. 1931 * 1932 * Heap pools are per-VM. This function allows one to retrieve the heap pool 1933 * attached to a VM. 1934 * 1935 * If no heap pool exists yet, and @create is true, we create one. 1936 * 1937 * The returned panthor_heap_pool should be released with panthor_heap_pool_put(). 1938 * 1939 * Return: A valid pointer on success, an ERR_PTR() otherwise. 1940 */ 1941 struct panthor_heap_pool *panthor_vm_get_heap_pool(struct panthor_vm *vm, bool create) 1942 { 1943 struct panthor_heap_pool *pool; 1944 1945 mutex_lock(&vm->heaps.lock); 1946 if (!vm->heaps.pool && create) { 1947 if (vm->destroyed) 1948 pool = ERR_PTR(-EINVAL); 1949 else 1950 pool = panthor_heap_pool_create(vm->ptdev, vm); 1951 1952 if (!IS_ERR(pool)) 1953 vm->heaps.pool = panthor_heap_pool_get(pool); 1954 } else { 1955 pool = panthor_heap_pool_get(vm->heaps.pool); 1956 if (!pool) 1957 pool = ERR_PTR(-ENOENT); 1958 } 1959 mutex_unlock(&vm->heaps.lock); 1960 1961 return pool; 1962 } 1963 1964 /** 1965 * panthor_vm_heaps_sizes() - Calculate size of all heap chunks across all 1966 * heaps over all the heap pools in a VM 1967 * @pfile: File. 1968 * @stats: Memory stats to be updated. 1969 * 1970 * Calculate all heap chunk sizes in all heap pools bound to a VM. If the VM 1971 * is active, record the size as active as well. 1972 */ 1973 void panthor_vm_heaps_sizes(struct panthor_file *pfile, struct drm_memory_stats *stats) 1974 { 1975 struct panthor_vm *vm; 1976 unsigned long i; 1977 1978 if (!pfile->vms) 1979 return; 1980 1981 xa_lock(&pfile->vms->xa); 1982 xa_for_each(&pfile->vms->xa, i, vm) { 1983 size_t size = panthor_heap_pool_size(vm->heaps.pool); 1984 stats->resident += size; 1985 if (vm->as.id >= 0) 1986 stats->active += size; 1987 } 1988 xa_unlock(&pfile->vms->xa); 1989 } 1990 1991 static u64 mair_to_memattr(u64 mair, bool coherent) 1992 { 1993 u64 memattr = 0; 1994 u32 i; 1995 1996 for (i = 0; i < 8; i++) { 1997 u8 in_attr = mair >> (8 * i), out_attr; 1998 u8 outer = in_attr >> 4, inner = in_attr & 0xf; 1999 2000 /* For caching to be enabled, inner and outer caching policy 2001 * have to be both write-back, if one of them is write-through 2002 * or non-cacheable, we just choose non-cacheable. Device 2003 * memory is also translated to non-cacheable. 2004 */ 2005 if (!(outer & 3) || !(outer & 4) || !(inner & 4)) { 2006 out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC | 2007 AS_MEMATTR_AARCH64_SH_MIDGARD_INNER | 2008 AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false); 2009 } else { 2010 out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB | 2011 AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2); 2012 /* Use SH_MIDGARD_INNER mode when device isn't coherent, 2013 * so SH_IS, which is used when IOMMU_CACHE is set, maps 2014 * to Mali's internal-shareable mode. As per the Mali 2015 * Spec, inner and outer-shareable modes aren't allowed 2016 * for WB memory when coherency is disabled. 2017 * Use SH_CPU_INNER mode when coherency is enabled, so 2018 * that SH_IS actually maps to the standard definition of 2019 * inner-shareable. 2020 */ 2021 if (!coherent) 2022 out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER; 2023 else 2024 out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER; 2025 } 2026 2027 memattr |= (u64)out_attr << (8 * i); 2028 } 2029 2030 return memattr; 2031 } 2032 2033 static void panthor_vma_link(struct panthor_vm *vm, 2034 struct panthor_vma *vma, 2035 struct drm_gpuvm_bo *vm_bo) 2036 { 2037 struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj); 2038 2039 mutex_lock(&bo->gpuva_list_lock); 2040 drm_gpuva_link(&vma->base, vm_bo); 2041 drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo)); 2042 mutex_unlock(&bo->gpuva_list_lock); 2043 } 2044 2045 static void panthor_vma_unlink(struct panthor_vm *vm, 2046 struct panthor_vma *vma) 2047 { 2048 struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj); 2049 struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo); 2050 2051 mutex_lock(&bo->gpuva_list_lock); 2052 drm_gpuva_unlink(&vma->base); 2053 mutex_unlock(&bo->gpuva_list_lock); 2054 2055 /* drm_gpuva_unlink() release the vm_bo, but we manually retained it 2056 * when entering this function, so we can implement deferred VMA 2057 * destruction. Re-assign it here. 2058 */ 2059 vma->base.vm_bo = vm_bo; 2060 list_add_tail(&vma->node, &vm->op_ctx->returned_vmas); 2061 } 2062 2063 static void panthor_vma_init(struct panthor_vma *vma, u32 flags) 2064 { 2065 INIT_LIST_HEAD(&vma->node); 2066 vma->flags = flags; 2067 } 2068 2069 #define PANTHOR_VM_MAP_FLAGS \ 2070 (DRM_PANTHOR_VM_BIND_OP_MAP_READONLY | \ 2071 DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC | \ 2072 DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED) 2073 2074 static int panthor_gpuva_sm_step_map(struct drm_gpuva_op *op, void *priv) 2075 { 2076 struct panthor_vm *vm = priv; 2077 struct panthor_vm_op_ctx *op_ctx = vm->op_ctx; 2078 struct panthor_vma *vma = panthor_vm_op_ctx_get_vma(op_ctx); 2079 int ret; 2080 2081 if (!vma) 2082 return -EINVAL; 2083 2084 panthor_vma_init(vma, op_ctx->flags & PANTHOR_VM_MAP_FLAGS); 2085 2086 ret = panthor_vm_map_pages(vm, op->map.va.addr, flags_to_prot(vma->flags), 2087 op_ctx->map.sgt, op->map.gem.offset, 2088 op->map.va.range); 2089 if (ret) 2090 return ret; 2091 2092 /* Ref owned by the mapping now, clear the obj field so we don't release the 2093 * pinning/obj ref behind GPUVA's back. 2094 */ 2095 drm_gpuva_map(&vm->base, &vma->base, &op->map); 2096 panthor_vma_link(vm, vma, op_ctx->map.vm_bo); 2097 op_ctx->map.vm_bo = NULL; 2098 return 0; 2099 } 2100 2101 static int panthor_gpuva_sm_step_remap(struct drm_gpuva_op *op, 2102 void *priv) 2103 { 2104 struct panthor_vma *unmap_vma = container_of(op->remap.unmap->va, struct panthor_vma, base); 2105 struct panthor_vm *vm = priv; 2106 struct panthor_vm_op_ctx *op_ctx = vm->op_ctx; 2107 struct panthor_vma *prev_vma = NULL, *next_vma = NULL; 2108 u64 unmap_start, unmap_range; 2109 int ret; 2110 2111 drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range); 2112 ret = panthor_vm_unmap_pages(vm, unmap_start, unmap_range); 2113 if (ret) 2114 return ret; 2115 2116 if (op->remap.prev) { 2117 prev_vma = panthor_vm_op_ctx_get_vma(op_ctx); 2118 panthor_vma_init(prev_vma, unmap_vma->flags); 2119 } 2120 2121 if (op->remap.next) { 2122 next_vma = panthor_vm_op_ctx_get_vma(op_ctx); 2123 panthor_vma_init(next_vma, unmap_vma->flags); 2124 } 2125 2126 drm_gpuva_remap(prev_vma ? &prev_vma->base : NULL, 2127 next_vma ? &next_vma->base : NULL, 2128 &op->remap); 2129 2130 if (prev_vma) { 2131 /* panthor_vma_link() transfers the vm_bo ownership to 2132 * the VMA object. Since the vm_bo we're passing is still 2133 * owned by the old mapping which will be released when this 2134 * mapping is destroyed, we need to grab a ref here. 2135 */ 2136 panthor_vma_link(vm, prev_vma, 2137 drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo)); 2138 } 2139 2140 if (next_vma) { 2141 panthor_vma_link(vm, next_vma, 2142 drm_gpuvm_bo_get(op->remap.unmap->va->vm_bo)); 2143 } 2144 2145 panthor_vma_unlink(vm, unmap_vma); 2146 return 0; 2147 } 2148 2149 static int panthor_gpuva_sm_step_unmap(struct drm_gpuva_op *op, 2150 void *priv) 2151 { 2152 struct panthor_vma *unmap_vma = container_of(op->unmap.va, struct panthor_vma, base); 2153 struct panthor_vm *vm = priv; 2154 int ret; 2155 2156 ret = panthor_vm_unmap_pages(vm, unmap_vma->base.va.addr, 2157 unmap_vma->base.va.range); 2158 if (drm_WARN_ON(&vm->ptdev->base, ret)) 2159 return ret; 2160 2161 drm_gpuva_unmap(&op->unmap); 2162 panthor_vma_unlink(vm, unmap_vma); 2163 return 0; 2164 } 2165 2166 static const struct drm_gpuvm_ops panthor_gpuvm_ops = { 2167 .vm_free = panthor_vm_free, 2168 .sm_step_map = panthor_gpuva_sm_step_map, 2169 .sm_step_remap = panthor_gpuva_sm_step_remap, 2170 .sm_step_unmap = panthor_gpuva_sm_step_unmap, 2171 }; 2172 2173 /** 2174 * panthor_vm_resv() - Get the dma_resv object attached to a VM. 2175 * @vm: VM to get the dma_resv of. 2176 * 2177 * Return: A dma_resv object. 2178 */ 2179 struct dma_resv *panthor_vm_resv(struct panthor_vm *vm) 2180 { 2181 return drm_gpuvm_resv(&vm->base); 2182 } 2183 2184 struct drm_gem_object *panthor_vm_root_gem(struct panthor_vm *vm) 2185 { 2186 if (!vm) 2187 return NULL; 2188 2189 return vm->base.r_obj; 2190 } 2191 2192 static int 2193 panthor_vm_exec_op(struct panthor_vm *vm, struct panthor_vm_op_ctx *op, 2194 bool flag_vm_unusable_on_failure) 2195 { 2196 u32 op_type = op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK; 2197 int ret; 2198 2199 if (op_type == DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY) 2200 return 0; 2201 2202 mutex_lock(&vm->op_lock); 2203 vm->op_ctx = op; 2204 switch (op_type) { 2205 case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: { 2206 const struct drm_gpuvm_map_req map_req = { 2207 .map.va.addr = op->va.addr, 2208 .map.va.range = op->va.range, 2209 .map.gem.obj = op->map.vm_bo->obj, 2210 .map.gem.offset = op->map.bo_offset, 2211 }; 2212 2213 if (vm->unusable) { 2214 ret = -EINVAL; 2215 break; 2216 } 2217 2218 ret = drm_gpuvm_sm_map(&vm->base, vm, &map_req); 2219 break; 2220 } 2221 2222 case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: 2223 ret = drm_gpuvm_sm_unmap(&vm->base, vm, op->va.addr, op->va.range); 2224 break; 2225 2226 default: 2227 ret = -EINVAL; 2228 break; 2229 } 2230 2231 if (ret && flag_vm_unusable_on_failure) 2232 vm->unusable = true; 2233 2234 vm->op_ctx = NULL; 2235 mutex_unlock(&vm->op_lock); 2236 2237 return ret; 2238 } 2239 2240 static struct dma_fence * 2241 panthor_vm_bind_run_job(struct drm_sched_job *sched_job) 2242 { 2243 struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base); 2244 bool cookie; 2245 int ret; 2246 2247 /* Not only we report an error whose result is propagated to the 2248 * drm_sched finished fence, but we also flag the VM as unusable, because 2249 * a failure in the async VM_BIND results in an inconsistent state. VM needs 2250 * to be destroyed and recreated. 2251 */ 2252 cookie = dma_fence_begin_signalling(); 2253 ret = panthor_vm_exec_op(job->vm, &job->ctx, true); 2254 dma_fence_end_signalling(cookie); 2255 2256 return ret ? ERR_PTR(ret) : NULL; 2257 } 2258 2259 static void panthor_vm_bind_job_release(struct kref *kref) 2260 { 2261 struct panthor_vm_bind_job *job = container_of(kref, struct panthor_vm_bind_job, refcount); 2262 2263 if (job->base.s_fence) 2264 drm_sched_job_cleanup(&job->base); 2265 2266 panthor_vm_cleanup_op_ctx(&job->ctx, job->vm); 2267 panthor_vm_put(job->vm); 2268 kfree(job); 2269 } 2270 2271 /** 2272 * panthor_vm_bind_job_put() - Release a VM_BIND job reference 2273 * @sched_job: Job to release the reference on. 2274 */ 2275 void panthor_vm_bind_job_put(struct drm_sched_job *sched_job) 2276 { 2277 struct panthor_vm_bind_job *job = 2278 container_of(sched_job, struct panthor_vm_bind_job, base); 2279 2280 if (sched_job) 2281 kref_put(&job->refcount, panthor_vm_bind_job_release); 2282 } 2283 2284 static void 2285 panthor_vm_bind_free_job(struct drm_sched_job *sched_job) 2286 { 2287 struct panthor_vm_bind_job *job = 2288 container_of(sched_job, struct panthor_vm_bind_job, base); 2289 2290 drm_sched_job_cleanup(sched_job); 2291 2292 /* Do the heavy cleanups asynchronously, so we're out of the 2293 * dma-signaling path and can acquire dma-resv locks safely. 2294 */ 2295 queue_work(panthor_cleanup_wq, &job->cleanup_op_ctx_work); 2296 } 2297 2298 static enum drm_gpu_sched_stat 2299 panthor_vm_bind_timedout_job(struct drm_sched_job *sched_job) 2300 { 2301 WARN(1, "VM_BIND ops are synchronous for now, there should be no timeout!"); 2302 return DRM_GPU_SCHED_STAT_RESET; 2303 } 2304 2305 static const struct drm_sched_backend_ops panthor_vm_bind_ops = { 2306 .run_job = panthor_vm_bind_run_job, 2307 .free_job = panthor_vm_bind_free_job, 2308 .timedout_job = panthor_vm_bind_timedout_job, 2309 }; 2310 2311 /** 2312 * panthor_vm_create() - Create a VM 2313 * @ptdev: Device. 2314 * @for_mcu: True if this is the FW MCU VM. 2315 * @kernel_va_start: Start of the range reserved for kernel BO mapping. 2316 * @kernel_va_size: Size of the range reserved for kernel BO mapping. 2317 * @auto_kernel_va_start: Start of the auto-VA kernel range. 2318 * @auto_kernel_va_size: Size of the auto-VA kernel range. 2319 * 2320 * Return: A valid pointer on success, an ERR_PTR() otherwise. 2321 */ 2322 struct panthor_vm * 2323 panthor_vm_create(struct panthor_device *ptdev, bool for_mcu, 2324 u64 kernel_va_start, u64 kernel_va_size, 2325 u64 auto_kernel_va_start, u64 auto_kernel_va_size) 2326 { 2327 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features); 2328 u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(ptdev->gpu_info.mmu_features); 2329 u64 full_va_range = 1ull << va_bits; 2330 struct drm_gem_object *dummy_gem; 2331 struct drm_gpu_scheduler *sched; 2332 const struct drm_sched_init_args sched_args = { 2333 .ops = &panthor_vm_bind_ops, 2334 .submit_wq = ptdev->mmu->vm.wq, 2335 .num_rqs = 1, 2336 .credit_limit = 1, 2337 /* Bind operations are synchronous for now, no timeout needed. */ 2338 .timeout = MAX_SCHEDULE_TIMEOUT, 2339 .name = "panthor-vm-bind", 2340 .dev = ptdev->base.dev, 2341 }; 2342 struct io_pgtable_cfg pgtbl_cfg; 2343 u64 mair, min_va, va_range; 2344 struct panthor_vm *vm; 2345 int ret; 2346 2347 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 2348 if (!vm) 2349 return ERR_PTR(-ENOMEM); 2350 2351 /* We allocate a dummy GEM for the VM. */ 2352 dummy_gem = drm_gpuvm_resv_object_alloc(&ptdev->base); 2353 if (!dummy_gem) { 2354 ret = -ENOMEM; 2355 goto err_free_vm; 2356 } 2357 2358 mutex_init(&vm->heaps.lock); 2359 vm->for_mcu = for_mcu; 2360 vm->ptdev = ptdev; 2361 mutex_init(&vm->op_lock); 2362 2363 if (for_mcu) { 2364 /* CSF MCU is a cortex M7, and can only address 4G */ 2365 min_va = 0; 2366 va_range = SZ_4G; 2367 } else { 2368 min_va = 0; 2369 va_range = full_va_range; 2370 } 2371 2372 mutex_init(&vm->mm_lock); 2373 drm_mm_init(&vm->mm, kernel_va_start, kernel_va_size); 2374 vm->kernel_auto_va.start = auto_kernel_va_start; 2375 vm->kernel_auto_va.end = vm->kernel_auto_va.start + auto_kernel_va_size - 1; 2376 2377 INIT_LIST_HEAD(&vm->node); 2378 INIT_LIST_HEAD(&vm->as.lru_node); 2379 vm->as.id = -1; 2380 refcount_set(&vm->as.active_cnt, 0); 2381 2382 pgtbl_cfg = (struct io_pgtable_cfg) { 2383 .pgsize_bitmap = SZ_4K | SZ_2M, 2384 .ias = va_bits, 2385 .oas = pa_bits, 2386 .coherent_walk = ptdev->coherent, 2387 .tlb = &mmu_tlb_ops, 2388 .iommu_dev = ptdev->base.dev, 2389 .alloc = alloc_pt, 2390 .free = free_pt, 2391 }; 2392 2393 vm->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &pgtbl_cfg, vm); 2394 if (!vm->pgtbl_ops) { 2395 ret = -EINVAL; 2396 goto err_mm_takedown; 2397 } 2398 2399 ret = drm_sched_init(&vm->sched, &sched_args); 2400 if (ret) 2401 goto err_free_io_pgtable; 2402 2403 sched = &vm->sched; 2404 ret = drm_sched_entity_init(&vm->entity, 0, &sched, 1, NULL); 2405 if (ret) 2406 goto err_sched_fini; 2407 2408 mair = io_pgtable_ops_to_pgtable(vm->pgtbl_ops)->cfg.arm_lpae_s1_cfg.mair; 2409 vm->memattr = mair_to_memattr(mair, ptdev->coherent); 2410 2411 mutex_lock(&ptdev->mmu->vm.lock); 2412 list_add_tail(&vm->node, &ptdev->mmu->vm.list); 2413 2414 /* If a reset is in progress, stop the scheduler. */ 2415 if (ptdev->mmu->vm.reset_in_progress) 2416 panthor_vm_stop(vm); 2417 mutex_unlock(&ptdev->mmu->vm.lock); 2418 2419 /* We intentionally leave the reserved range to zero, because we want kernel VMAs 2420 * to be handled the same way user VMAs are. 2421 */ 2422 drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM", 2423 DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem, 2424 min_va, va_range, 0, 0, &panthor_gpuvm_ops); 2425 drm_gem_object_put(dummy_gem); 2426 return vm; 2427 2428 err_sched_fini: 2429 drm_sched_fini(&vm->sched); 2430 2431 err_free_io_pgtable: 2432 free_io_pgtable_ops(vm->pgtbl_ops); 2433 2434 err_mm_takedown: 2435 drm_mm_takedown(&vm->mm); 2436 drm_gem_object_put(dummy_gem); 2437 2438 err_free_vm: 2439 kfree(vm); 2440 return ERR_PTR(ret); 2441 } 2442 2443 static int 2444 panthor_vm_bind_prepare_op_ctx(struct drm_file *file, 2445 struct panthor_vm *vm, 2446 const struct drm_panthor_vm_bind_op *op, 2447 struct panthor_vm_op_ctx *op_ctx) 2448 { 2449 ssize_t vm_pgsz = panthor_vm_page_size(vm); 2450 struct drm_gem_object *gem; 2451 int ret; 2452 2453 /* Aligned on page size. */ 2454 if (!IS_ALIGNED(op->va | op->size, vm_pgsz)) 2455 return -EINVAL; 2456 2457 switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) { 2458 case DRM_PANTHOR_VM_BIND_OP_TYPE_MAP: 2459 gem = drm_gem_object_lookup(file, op->bo_handle); 2460 ret = panthor_vm_prepare_map_op_ctx(op_ctx, vm, 2461 gem ? to_panthor_bo(gem) : NULL, 2462 op->bo_offset, 2463 op->size, 2464 op->va, 2465 op->flags); 2466 drm_gem_object_put(gem); 2467 return ret; 2468 2469 case DRM_PANTHOR_VM_BIND_OP_TYPE_UNMAP: 2470 if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) 2471 return -EINVAL; 2472 2473 if (op->bo_handle || op->bo_offset) 2474 return -EINVAL; 2475 2476 return panthor_vm_prepare_unmap_op_ctx(op_ctx, vm, op->va, op->size); 2477 2478 case DRM_PANTHOR_VM_BIND_OP_TYPE_SYNC_ONLY: 2479 if (op->flags & ~DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) 2480 return -EINVAL; 2481 2482 if (op->bo_handle || op->bo_offset) 2483 return -EINVAL; 2484 2485 if (op->va || op->size) 2486 return -EINVAL; 2487 2488 if (!op->syncs.count) 2489 return -EINVAL; 2490 2491 panthor_vm_prepare_sync_only_op_ctx(op_ctx, vm); 2492 return 0; 2493 2494 default: 2495 return -EINVAL; 2496 } 2497 } 2498 2499 static void panthor_vm_bind_job_cleanup_op_ctx_work(struct work_struct *work) 2500 { 2501 struct panthor_vm_bind_job *job = 2502 container_of(work, struct panthor_vm_bind_job, cleanup_op_ctx_work); 2503 2504 panthor_vm_bind_job_put(&job->base); 2505 } 2506 2507 /** 2508 * panthor_vm_bind_job_create() - Create a VM_BIND job 2509 * @file: File. 2510 * @vm: VM targeted by the VM_BIND job. 2511 * @op: VM operation data. 2512 * 2513 * Return: A valid pointer on success, an ERR_PTR() otherwise. 2514 */ 2515 struct drm_sched_job * 2516 panthor_vm_bind_job_create(struct drm_file *file, 2517 struct panthor_vm *vm, 2518 const struct drm_panthor_vm_bind_op *op) 2519 { 2520 struct panthor_vm_bind_job *job; 2521 int ret; 2522 2523 if (!vm) 2524 return ERR_PTR(-EINVAL); 2525 2526 if (vm->destroyed || vm->unusable) 2527 return ERR_PTR(-EINVAL); 2528 2529 job = kzalloc(sizeof(*job), GFP_KERNEL); 2530 if (!job) 2531 return ERR_PTR(-ENOMEM); 2532 2533 ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &job->ctx); 2534 if (ret) { 2535 kfree(job); 2536 return ERR_PTR(ret); 2537 } 2538 2539 INIT_WORK(&job->cleanup_op_ctx_work, panthor_vm_bind_job_cleanup_op_ctx_work); 2540 kref_init(&job->refcount); 2541 job->vm = panthor_vm_get(vm); 2542 2543 ret = drm_sched_job_init(&job->base, &vm->entity, 1, vm, file->client_id); 2544 if (ret) 2545 goto err_put_job; 2546 2547 return &job->base; 2548 2549 err_put_job: 2550 panthor_vm_bind_job_put(&job->base); 2551 return ERR_PTR(ret); 2552 } 2553 2554 /** 2555 * panthor_vm_bind_job_prepare_resvs() - Prepare VM_BIND job dma_resvs 2556 * @exec: The locking/preparation context. 2557 * @sched_job: The job to prepare resvs on. 2558 * 2559 * Locks and prepare the VM resv. 2560 * 2561 * If this is a map operation, locks and prepares the GEM resv. 2562 * 2563 * Return: 0 on success, a negative error code otherwise. 2564 */ 2565 int panthor_vm_bind_job_prepare_resvs(struct drm_exec *exec, 2566 struct drm_sched_job *sched_job) 2567 { 2568 struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base); 2569 int ret; 2570 2571 /* Acquire the VM lock an reserve a slot for this VM bind job. */ 2572 ret = drm_gpuvm_prepare_vm(&job->vm->base, exec, 1); 2573 if (ret) 2574 return ret; 2575 2576 if (job->ctx.map.vm_bo) { 2577 /* Lock/prepare the GEM being mapped. */ 2578 ret = drm_exec_prepare_obj(exec, job->ctx.map.vm_bo->obj, 1); 2579 if (ret) 2580 return ret; 2581 } 2582 2583 return 0; 2584 } 2585 2586 /** 2587 * panthor_vm_bind_job_update_resvs() - Update the resv objects touched by a job 2588 * @exec: drm_exec context. 2589 * @sched_job: Job to update the resvs on. 2590 */ 2591 void panthor_vm_bind_job_update_resvs(struct drm_exec *exec, 2592 struct drm_sched_job *sched_job) 2593 { 2594 struct panthor_vm_bind_job *job = container_of(sched_job, struct panthor_vm_bind_job, base); 2595 2596 /* Explicit sync => we just register our job finished fence as bookkeep. */ 2597 drm_gpuvm_resv_add_fence(&job->vm->base, exec, 2598 &sched_job->s_fence->finished, 2599 DMA_RESV_USAGE_BOOKKEEP, 2600 DMA_RESV_USAGE_BOOKKEEP); 2601 } 2602 2603 void panthor_vm_update_resvs(struct panthor_vm *vm, struct drm_exec *exec, 2604 struct dma_fence *fence, 2605 enum dma_resv_usage private_usage, 2606 enum dma_resv_usage extobj_usage) 2607 { 2608 drm_gpuvm_resv_add_fence(&vm->base, exec, fence, private_usage, extobj_usage); 2609 } 2610 2611 /** 2612 * panthor_vm_bind_exec_sync_op() - Execute a VM_BIND operation synchronously. 2613 * @file: File. 2614 * @vm: VM targeted by the VM operation. 2615 * @op: Data describing the VM operation. 2616 * 2617 * Return: 0 on success, a negative error code otherwise. 2618 */ 2619 int panthor_vm_bind_exec_sync_op(struct drm_file *file, 2620 struct panthor_vm *vm, 2621 struct drm_panthor_vm_bind_op *op) 2622 { 2623 struct panthor_vm_op_ctx op_ctx; 2624 int ret; 2625 2626 /* No sync objects allowed on synchronous operations. */ 2627 if (op->syncs.count) 2628 return -EINVAL; 2629 2630 if (!op->size) 2631 return 0; 2632 2633 ret = panthor_vm_bind_prepare_op_ctx(file, vm, op, &op_ctx); 2634 if (ret) 2635 return ret; 2636 2637 ret = panthor_vm_exec_op(vm, &op_ctx, false); 2638 panthor_vm_cleanup_op_ctx(&op_ctx, vm); 2639 2640 return ret; 2641 } 2642 2643 /** 2644 * panthor_vm_map_bo_range() - Map a GEM object range to a VM 2645 * @vm: VM to map the GEM to. 2646 * @bo: GEM object to map. 2647 * @offset: Offset in the GEM object. 2648 * @size: Size to map. 2649 * @va: Virtual address to map the object to. 2650 * @flags: Combination of drm_panthor_vm_bind_op_flags flags. 2651 * Only map-related flags are valid. 2652 * 2653 * Internal use only. For userspace requests, use 2654 * panthor_vm_bind_exec_sync_op() instead. 2655 * 2656 * Return: 0 on success, a negative error code otherwise. 2657 */ 2658 int panthor_vm_map_bo_range(struct panthor_vm *vm, struct panthor_gem_object *bo, 2659 u64 offset, u64 size, u64 va, u32 flags) 2660 { 2661 struct panthor_vm_op_ctx op_ctx; 2662 int ret; 2663 2664 ret = panthor_vm_prepare_map_op_ctx(&op_ctx, vm, bo, offset, size, va, flags); 2665 if (ret) 2666 return ret; 2667 2668 ret = panthor_vm_exec_op(vm, &op_ctx, false); 2669 panthor_vm_cleanup_op_ctx(&op_ctx, vm); 2670 2671 return ret; 2672 } 2673 2674 /** 2675 * panthor_vm_unmap_range() - Unmap a portion of the VA space 2676 * @vm: VM to unmap the region from. 2677 * @va: Virtual address to unmap. Must be 4k aligned. 2678 * @size: Size of the region to unmap. Must be 4k aligned. 2679 * 2680 * Internal use only. For userspace requests, use 2681 * panthor_vm_bind_exec_sync_op() instead. 2682 * 2683 * Return: 0 on success, a negative error code otherwise. 2684 */ 2685 int panthor_vm_unmap_range(struct panthor_vm *vm, u64 va, u64 size) 2686 { 2687 struct panthor_vm_op_ctx op_ctx; 2688 int ret; 2689 2690 ret = panthor_vm_prepare_unmap_op_ctx(&op_ctx, vm, va, size); 2691 if (ret) 2692 return ret; 2693 2694 ret = panthor_vm_exec_op(vm, &op_ctx, false); 2695 panthor_vm_cleanup_op_ctx(&op_ctx, vm); 2696 2697 return ret; 2698 } 2699 2700 /** 2701 * panthor_vm_prepare_mapped_bos_resvs() - Prepare resvs on VM BOs. 2702 * @exec: Locking/preparation context. 2703 * @vm: VM targeted by the GPU job. 2704 * @slot_count: Number of slots to reserve. 2705 * 2706 * GPU jobs assume all BOs bound to the VM at the time the job is submitted 2707 * are available when the job is executed. In order to guarantee that, we 2708 * need to reserve a slot on all BOs mapped to a VM and update this slot with 2709 * the job fence after its submission. 2710 * 2711 * Return: 0 on success, a negative error code otherwise. 2712 */ 2713 int panthor_vm_prepare_mapped_bos_resvs(struct drm_exec *exec, struct panthor_vm *vm, 2714 u32 slot_count) 2715 { 2716 int ret; 2717 2718 /* Acquire the VM lock and reserve a slot for this GPU job. */ 2719 ret = drm_gpuvm_prepare_vm(&vm->base, exec, slot_count); 2720 if (ret) 2721 return ret; 2722 2723 return drm_gpuvm_prepare_objects(&vm->base, exec, slot_count); 2724 } 2725 2726 /** 2727 * panthor_mmu_unplug() - Unplug the MMU logic 2728 * @ptdev: Device. 2729 * 2730 * No access to the MMU regs should be done after this function is called. 2731 * We suspend the IRQ and disable all VMs to guarantee that. 2732 */ 2733 void panthor_mmu_unplug(struct panthor_device *ptdev) 2734 { 2735 if (!IS_ENABLED(CONFIG_PM) || pm_runtime_active(ptdev->base.dev)) 2736 panthor_mmu_irq_suspend(&ptdev->mmu->irq); 2737 2738 mutex_lock(&ptdev->mmu->as.slots_lock); 2739 for (u32 i = 0; i < ARRAY_SIZE(ptdev->mmu->as.slots); i++) { 2740 struct panthor_vm *vm = ptdev->mmu->as.slots[i].vm; 2741 2742 if (vm) { 2743 drm_WARN_ON(&ptdev->base, panthor_mmu_as_disable(ptdev, i)); 2744 panthor_vm_release_as_locked(vm); 2745 } 2746 } 2747 mutex_unlock(&ptdev->mmu->as.slots_lock); 2748 } 2749 2750 static void panthor_mmu_release_wq(struct drm_device *ddev, void *res) 2751 { 2752 destroy_workqueue(res); 2753 } 2754 2755 /** 2756 * panthor_mmu_init() - Initialize the MMU logic. 2757 * @ptdev: Device. 2758 * 2759 * Return: 0 on success, a negative error code otherwise. 2760 */ 2761 int panthor_mmu_init(struct panthor_device *ptdev) 2762 { 2763 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(ptdev->gpu_info.mmu_features); 2764 struct panthor_mmu *mmu; 2765 int ret, irq; 2766 2767 mmu = drmm_kzalloc(&ptdev->base, sizeof(*mmu), GFP_KERNEL); 2768 if (!mmu) 2769 return -ENOMEM; 2770 2771 INIT_LIST_HEAD(&mmu->as.lru_list); 2772 2773 ret = drmm_mutex_init(&ptdev->base, &mmu->as.slots_lock); 2774 if (ret) 2775 return ret; 2776 2777 INIT_LIST_HEAD(&mmu->vm.list); 2778 ret = drmm_mutex_init(&ptdev->base, &mmu->vm.lock); 2779 if (ret) 2780 return ret; 2781 2782 ptdev->mmu = mmu; 2783 2784 irq = platform_get_irq_byname(to_platform_device(ptdev->base.dev), "mmu"); 2785 if (irq <= 0) 2786 return -ENODEV; 2787 2788 ret = panthor_request_mmu_irq(ptdev, &mmu->irq, irq, 2789 panthor_mmu_fault_mask(ptdev, ~0)); 2790 if (ret) 2791 return ret; 2792 2793 mmu->vm.wq = alloc_workqueue("panthor-vm-bind", WQ_UNBOUND, 0); 2794 if (!mmu->vm.wq) 2795 return -ENOMEM; 2796 2797 /* On 32-bit kernels, the VA space is limited by the io_pgtable_ops abstraction, 2798 * which passes iova as an unsigned long. Patch the mmu_features to reflect this 2799 * limitation. 2800 */ 2801 if (va_bits > BITS_PER_LONG) { 2802 ptdev->gpu_info.mmu_features &= ~GENMASK(7, 0); 2803 ptdev->gpu_info.mmu_features |= BITS_PER_LONG; 2804 } 2805 2806 return drmm_add_action_or_reset(&ptdev->base, panthor_mmu_release_wq, mmu->vm.wq); 2807 } 2808 2809 #ifdef CONFIG_DEBUG_FS 2810 static int show_vm_gpuvas(struct panthor_vm *vm, struct seq_file *m) 2811 { 2812 int ret; 2813 2814 mutex_lock(&vm->op_lock); 2815 ret = drm_debugfs_gpuva_info(m, &vm->base); 2816 mutex_unlock(&vm->op_lock); 2817 2818 return ret; 2819 } 2820 2821 static int show_each_vm(struct seq_file *m, void *arg) 2822 { 2823 struct drm_info_node *node = (struct drm_info_node *)m->private; 2824 struct drm_device *ddev = node->minor->dev; 2825 struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base); 2826 int (*show)(struct panthor_vm *, struct seq_file *) = node->info_ent->data; 2827 struct panthor_vm *vm; 2828 int ret = 0; 2829 2830 mutex_lock(&ptdev->mmu->vm.lock); 2831 list_for_each_entry(vm, &ptdev->mmu->vm.list, node) { 2832 ret = show(vm, m); 2833 if (ret < 0) 2834 break; 2835 2836 seq_puts(m, "\n"); 2837 } 2838 mutex_unlock(&ptdev->mmu->vm.lock); 2839 2840 return ret; 2841 } 2842 2843 static struct drm_info_list panthor_mmu_debugfs_list[] = { 2844 DRM_DEBUGFS_GPUVA_INFO(show_each_vm, show_vm_gpuvas), 2845 }; 2846 2847 /** 2848 * panthor_mmu_debugfs_init() - Initialize MMU debugfs entries 2849 * @minor: Minor. 2850 */ 2851 void panthor_mmu_debugfs_init(struct drm_minor *minor) 2852 { 2853 drm_debugfs_create_files(panthor_mmu_debugfs_list, 2854 ARRAY_SIZE(panthor_mmu_debugfs_list), 2855 minor->debugfs_root, minor); 2856 } 2857 #endif /* CONFIG_DEBUG_FS */ 2858 2859 /** 2860 * panthor_mmu_pt_cache_init() - Initialize the page table cache. 2861 * 2862 * Return: 0 on success, a negative error code otherwise. 2863 */ 2864 int panthor_mmu_pt_cache_init(void) 2865 { 2866 pt_cache = kmem_cache_create("panthor-mmu-pt", SZ_4K, SZ_4K, 0, NULL); 2867 if (!pt_cache) 2868 return -ENOMEM; 2869 2870 return 0; 2871 } 2872 2873 /** 2874 * panthor_mmu_pt_cache_fini() - Destroy the page table cache. 2875 */ 2876 void panthor_mmu_pt_cache_fini(void) 2877 { 2878 kmem_cache_destroy(pt_cache); 2879 } 2880