1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2021 Intel Corporation 4 */ 5 6 #include "xe_vm.h" 7 8 #include <linux/dma-fence-array.h> 9 #include <linux/nospec.h> 10 11 #include <drm/drm_exec.h> 12 #include <drm/drm_print.h> 13 #include <drm/ttm/ttm_execbuf_util.h> 14 #include <drm/ttm/ttm_tt.h> 15 #include <drm/xe_drm.h> 16 #include <linux/delay.h> 17 #include <linux/kthread.h> 18 #include <linux/mm.h> 19 #include <linux/swap.h> 20 21 #include "xe_assert.h" 22 #include "xe_bo.h" 23 #include "xe_device.h" 24 #include "xe_drm_client.h" 25 #include "xe_exec_queue.h" 26 #include "xe_gt.h" 27 #include "xe_gt_pagefault.h" 28 #include "xe_gt_tlb_invalidation.h" 29 #include "xe_migrate.h" 30 #include "xe_pat.h" 31 #include "xe_pm.h" 32 #include "xe_preempt_fence.h" 33 #include "xe_pt.h" 34 #include "xe_res_cursor.h" 35 #include "xe_sync.h" 36 #include "xe_trace.h" 37 #include "generated/xe_wa_oob.h" 38 #include "xe_wa.h" 39 40 static struct drm_gem_object *xe_vm_obj(struct xe_vm *vm) 41 { 42 return vm->gpuvm.r_obj; 43 } 44 45 /** 46 * xe_vma_userptr_check_repin() - Advisory check for repin needed 47 * @uvma: The userptr vma 48 * 49 * Check if the userptr vma has been invalidated since last successful 50 * repin. The check is advisory only and can the function can be called 51 * without the vm->userptr.notifier_lock held. There is no guarantee that the 52 * vma userptr will remain valid after a lockless check, so typically 53 * the call needs to be followed by a proper check under the notifier_lock. 54 * 55 * Return: 0 if userptr vma is valid, -EAGAIN otherwise; repin recommended. 56 */ 57 int xe_vma_userptr_check_repin(struct xe_userptr_vma *uvma) 58 { 59 return mmu_interval_check_retry(&uvma->userptr.notifier, 60 uvma->userptr.notifier_seq) ? 61 -EAGAIN : 0; 62 } 63 64 int xe_vma_userptr_pin_pages(struct xe_userptr_vma *uvma) 65 { 66 struct xe_userptr *userptr = &uvma->userptr; 67 struct xe_vma *vma = &uvma->vma; 68 struct xe_vm *vm = xe_vma_vm(vma); 69 struct xe_device *xe = vm->xe; 70 const unsigned long num_pages = xe_vma_size(vma) >> PAGE_SHIFT; 71 struct page **pages; 72 bool in_kthread = !current->mm; 73 unsigned long notifier_seq; 74 int pinned, ret, i; 75 bool read_only = xe_vma_read_only(vma); 76 77 lockdep_assert_held(&vm->lock); 78 xe_assert(xe, xe_vma_is_userptr(vma)); 79 retry: 80 if (vma->gpuva.flags & XE_VMA_DESTROYED) 81 return 0; 82 83 notifier_seq = mmu_interval_read_begin(&userptr->notifier); 84 if (notifier_seq == userptr->notifier_seq) 85 return 0; 86 87 pages = kvmalloc_array(num_pages, sizeof(*pages), GFP_KERNEL); 88 if (!pages) 89 return -ENOMEM; 90 91 if (userptr->sg) { 92 dma_unmap_sgtable(xe->drm.dev, 93 userptr->sg, 94 read_only ? DMA_TO_DEVICE : 95 DMA_BIDIRECTIONAL, 0); 96 sg_free_table(userptr->sg); 97 userptr->sg = NULL; 98 } 99 100 pinned = ret = 0; 101 if (in_kthread) { 102 if (!mmget_not_zero(userptr->notifier.mm)) { 103 ret = -EFAULT; 104 goto mm_closed; 105 } 106 kthread_use_mm(userptr->notifier.mm); 107 } 108 109 while (pinned < num_pages) { 110 ret = get_user_pages_fast(xe_vma_userptr(vma) + 111 pinned * PAGE_SIZE, 112 num_pages - pinned, 113 read_only ? 0 : FOLL_WRITE, 114 &pages[pinned]); 115 if (ret < 0) 116 break; 117 118 pinned += ret; 119 ret = 0; 120 } 121 122 if (in_kthread) { 123 kthread_unuse_mm(userptr->notifier.mm); 124 mmput(userptr->notifier.mm); 125 } 126 mm_closed: 127 if (ret) 128 goto out; 129 130 ret = sg_alloc_table_from_pages_segment(&userptr->sgt, pages, 131 pinned, 0, 132 (u64)pinned << PAGE_SHIFT, 133 xe_sg_segment_size(xe->drm.dev), 134 GFP_KERNEL); 135 if (ret) { 136 userptr->sg = NULL; 137 goto out; 138 } 139 userptr->sg = &userptr->sgt; 140 141 ret = dma_map_sgtable(xe->drm.dev, userptr->sg, 142 read_only ? DMA_TO_DEVICE : 143 DMA_BIDIRECTIONAL, 144 DMA_ATTR_SKIP_CPU_SYNC | 145 DMA_ATTR_NO_KERNEL_MAPPING); 146 if (ret) { 147 sg_free_table(userptr->sg); 148 userptr->sg = NULL; 149 goto out; 150 } 151 152 for (i = 0; i < pinned; ++i) { 153 if (!read_only) { 154 lock_page(pages[i]); 155 set_page_dirty(pages[i]); 156 unlock_page(pages[i]); 157 } 158 159 mark_page_accessed(pages[i]); 160 } 161 162 out: 163 release_pages(pages, pinned); 164 kvfree(pages); 165 166 if (!(ret < 0)) { 167 userptr->notifier_seq = notifier_seq; 168 if (xe_vma_userptr_check_repin(uvma) == -EAGAIN) 169 goto retry; 170 } 171 172 return ret < 0 ? ret : 0; 173 } 174 175 static bool preempt_fences_waiting(struct xe_vm *vm) 176 { 177 struct xe_exec_queue *q; 178 179 lockdep_assert_held(&vm->lock); 180 xe_vm_assert_held(vm); 181 182 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { 183 if (!q->compute.pfence || 184 (q->compute.pfence && test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, 185 &q->compute.pfence->flags))) { 186 return true; 187 } 188 } 189 190 return false; 191 } 192 193 static void free_preempt_fences(struct list_head *list) 194 { 195 struct list_head *link, *next; 196 197 list_for_each_safe(link, next, list) 198 xe_preempt_fence_free(to_preempt_fence_from_link(link)); 199 } 200 201 static int alloc_preempt_fences(struct xe_vm *vm, struct list_head *list, 202 unsigned int *count) 203 { 204 lockdep_assert_held(&vm->lock); 205 xe_vm_assert_held(vm); 206 207 if (*count >= vm->preempt.num_exec_queues) 208 return 0; 209 210 for (; *count < vm->preempt.num_exec_queues; ++(*count)) { 211 struct xe_preempt_fence *pfence = xe_preempt_fence_alloc(); 212 213 if (IS_ERR(pfence)) 214 return PTR_ERR(pfence); 215 216 list_move_tail(xe_preempt_fence_link(pfence), list); 217 } 218 219 return 0; 220 } 221 222 static int wait_for_existing_preempt_fences(struct xe_vm *vm) 223 { 224 struct xe_exec_queue *q; 225 226 xe_vm_assert_held(vm); 227 228 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { 229 if (q->compute.pfence) { 230 long timeout = dma_fence_wait(q->compute.pfence, false); 231 232 if (timeout < 0) 233 return -ETIME; 234 dma_fence_put(q->compute.pfence); 235 q->compute.pfence = NULL; 236 } 237 } 238 239 return 0; 240 } 241 242 static bool xe_vm_is_idle(struct xe_vm *vm) 243 { 244 struct xe_exec_queue *q; 245 246 xe_vm_assert_held(vm); 247 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { 248 if (!xe_exec_queue_is_idle(q)) 249 return false; 250 } 251 252 return true; 253 } 254 255 static void arm_preempt_fences(struct xe_vm *vm, struct list_head *list) 256 { 257 struct list_head *link; 258 struct xe_exec_queue *q; 259 260 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { 261 struct dma_fence *fence; 262 263 link = list->next; 264 xe_assert(vm->xe, link != list); 265 266 fence = xe_preempt_fence_arm(to_preempt_fence_from_link(link), 267 q, q->compute.context, 268 ++q->compute.seqno); 269 dma_fence_put(q->compute.pfence); 270 q->compute.pfence = fence; 271 } 272 } 273 274 static int add_preempt_fences(struct xe_vm *vm, struct xe_bo *bo) 275 { 276 struct xe_exec_queue *q; 277 int err; 278 279 if (!vm->preempt.num_exec_queues) 280 return 0; 281 282 err = xe_bo_lock(bo, true); 283 if (err) 284 return err; 285 286 err = dma_resv_reserve_fences(bo->ttm.base.resv, vm->preempt.num_exec_queues); 287 if (err) 288 goto out_unlock; 289 290 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) 291 if (q->compute.pfence) { 292 dma_resv_add_fence(bo->ttm.base.resv, 293 q->compute.pfence, 294 DMA_RESV_USAGE_BOOKKEEP); 295 } 296 297 out_unlock: 298 xe_bo_unlock(bo); 299 return err; 300 } 301 302 static void resume_and_reinstall_preempt_fences(struct xe_vm *vm, 303 struct drm_exec *exec) 304 { 305 struct xe_exec_queue *q; 306 307 lockdep_assert_held(&vm->lock); 308 xe_vm_assert_held(vm); 309 310 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) { 311 q->ops->resume(q); 312 313 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, q->compute.pfence, 314 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP); 315 } 316 } 317 318 int xe_vm_add_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) 319 { 320 struct drm_gpuvm_exec vm_exec = { 321 .vm = &vm->gpuvm, 322 .flags = DRM_EXEC_INTERRUPTIBLE_WAIT, 323 .num_fences = 1, 324 }; 325 struct drm_exec *exec = &vm_exec.exec; 326 struct dma_fence *pfence; 327 int err; 328 bool wait; 329 330 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); 331 332 down_write(&vm->lock); 333 err = drm_gpuvm_exec_lock(&vm_exec); 334 if (err) 335 goto out_up_write; 336 337 pfence = xe_preempt_fence_create(q, q->compute.context, 338 ++q->compute.seqno); 339 if (!pfence) { 340 err = -ENOMEM; 341 goto out_fini; 342 } 343 344 list_add(&q->compute.link, &vm->preempt.exec_queues); 345 ++vm->preempt.num_exec_queues; 346 q->compute.pfence = pfence; 347 348 down_read(&vm->userptr.notifier_lock); 349 350 drm_gpuvm_resv_add_fence(&vm->gpuvm, exec, pfence, 351 DMA_RESV_USAGE_BOOKKEEP, DMA_RESV_USAGE_BOOKKEEP); 352 353 /* 354 * Check to see if a preemption on VM is in flight or userptr 355 * invalidation, if so trigger this preempt fence to sync state with 356 * other preempt fences on the VM. 357 */ 358 wait = __xe_vm_userptr_needs_repin(vm) || preempt_fences_waiting(vm); 359 if (wait) 360 dma_fence_enable_sw_signaling(pfence); 361 362 up_read(&vm->userptr.notifier_lock); 363 364 out_fini: 365 drm_exec_fini(exec); 366 out_up_write: 367 up_write(&vm->lock); 368 369 return err; 370 } 371 372 /** 373 * xe_vm_remove_compute_exec_queue() - Remove compute exec queue from VM 374 * @vm: The VM. 375 * @q: The exec_queue 376 */ 377 void xe_vm_remove_compute_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) 378 { 379 if (!xe_vm_in_preempt_fence_mode(vm)) 380 return; 381 382 down_write(&vm->lock); 383 list_del(&q->compute.link); 384 --vm->preempt.num_exec_queues; 385 if (q->compute.pfence) { 386 dma_fence_enable_sw_signaling(q->compute.pfence); 387 dma_fence_put(q->compute.pfence); 388 q->compute.pfence = NULL; 389 } 390 up_write(&vm->lock); 391 } 392 393 /** 394 * __xe_vm_userptr_needs_repin() - Check whether the VM does have userptrs 395 * that need repinning. 396 * @vm: The VM. 397 * 398 * This function checks for whether the VM has userptrs that need repinning, 399 * and provides a release-type barrier on the userptr.notifier_lock after 400 * checking. 401 * 402 * Return: 0 if there are no userptrs needing repinning, -EAGAIN if there are. 403 */ 404 int __xe_vm_userptr_needs_repin(struct xe_vm *vm) 405 { 406 lockdep_assert_held_read(&vm->userptr.notifier_lock); 407 408 return (list_empty(&vm->userptr.repin_list) && 409 list_empty(&vm->userptr.invalidated)) ? 0 : -EAGAIN; 410 } 411 412 #define XE_VM_REBIND_RETRY_TIMEOUT_MS 1000 413 414 static void xe_vm_kill(struct xe_vm *vm) 415 { 416 struct xe_exec_queue *q; 417 418 lockdep_assert_held(&vm->lock); 419 420 xe_vm_lock(vm, false); 421 vm->flags |= XE_VM_FLAG_BANNED; 422 trace_xe_vm_kill(vm); 423 424 list_for_each_entry(q, &vm->preempt.exec_queues, compute.link) 425 q->ops->kill(q); 426 xe_vm_unlock(vm); 427 428 /* TODO: Inform user the VM is banned */ 429 } 430 431 /** 432 * xe_vm_validate_should_retry() - Whether to retry after a validate error. 433 * @exec: The drm_exec object used for locking before validation. 434 * @err: The error returned from ttm_bo_validate(). 435 * @end: A ktime_t cookie that should be set to 0 before first use and 436 * that should be reused on subsequent calls. 437 * 438 * With multiple active VMs, under memory pressure, it is possible that 439 * ttm_bo_validate() run into -EDEADLK and in such case returns -ENOMEM. 440 * Until ttm properly handles locking in such scenarios, best thing the 441 * driver can do is retry with a timeout. Check if that is necessary, and 442 * if so unlock the drm_exec's objects while keeping the ticket to prepare 443 * for a rerun. 444 * 445 * Return: true if a retry after drm_exec_init() is recommended; 446 * false otherwise. 447 */ 448 bool xe_vm_validate_should_retry(struct drm_exec *exec, int err, ktime_t *end) 449 { 450 ktime_t cur; 451 452 if (err != -ENOMEM) 453 return false; 454 455 cur = ktime_get(); 456 *end = *end ? : ktime_add_ms(cur, XE_VM_REBIND_RETRY_TIMEOUT_MS); 457 if (!ktime_before(cur, *end)) 458 return false; 459 460 msleep(20); 461 return true; 462 } 463 464 static int xe_gpuvm_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec) 465 { 466 struct xe_vm *vm = gpuvm_to_vm(vm_bo->vm); 467 struct drm_gpuva *gpuva; 468 int ret; 469 470 lockdep_assert_held(&vm->lock); 471 drm_gpuvm_bo_for_each_va(gpuva, vm_bo) 472 list_move_tail(&gpuva_to_vma(gpuva)->combined_links.rebind, 473 &vm->rebind_list); 474 475 ret = xe_bo_validate(gem_to_xe_bo(vm_bo->obj), vm, false); 476 if (ret) 477 return ret; 478 479 vm_bo->evicted = false; 480 return 0; 481 } 482 483 static int xe_preempt_work_begin(struct drm_exec *exec, struct xe_vm *vm, 484 bool *done) 485 { 486 int err; 487 488 /* 489 * 1 fence for each preempt fence plus a fence for each tile from a 490 * possible rebind 491 */ 492 err = drm_gpuvm_prepare_vm(&vm->gpuvm, exec, vm->preempt.num_exec_queues + 493 vm->xe->info.tile_count); 494 if (err) 495 return err; 496 497 if (xe_vm_is_idle(vm)) { 498 vm->preempt.rebind_deactivated = true; 499 *done = true; 500 return 0; 501 } 502 503 if (!preempt_fences_waiting(vm)) { 504 *done = true; 505 return 0; 506 } 507 508 err = drm_gpuvm_prepare_objects(&vm->gpuvm, exec, vm->preempt.num_exec_queues); 509 if (err) 510 return err; 511 512 err = wait_for_existing_preempt_fences(vm); 513 if (err) 514 return err; 515 516 return drm_gpuvm_validate(&vm->gpuvm, exec); 517 } 518 519 static void preempt_rebind_work_func(struct work_struct *w) 520 { 521 struct xe_vm *vm = container_of(w, struct xe_vm, preempt.rebind_work); 522 struct drm_exec exec; 523 struct dma_fence *rebind_fence; 524 unsigned int fence_count = 0; 525 LIST_HEAD(preempt_fences); 526 ktime_t end = 0; 527 int err = 0; 528 long wait; 529 int __maybe_unused tries = 0; 530 531 xe_assert(vm->xe, xe_vm_in_preempt_fence_mode(vm)); 532 trace_xe_vm_rebind_worker_enter(vm); 533 534 down_write(&vm->lock); 535 536 if (xe_vm_is_closed_or_banned(vm)) { 537 up_write(&vm->lock); 538 trace_xe_vm_rebind_worker_exit(vm); 539 return; 540 } 541 542 retry: 543 if (xe_vm_userptr_check_repin(vm)) { 544 err = xe_vm_userptr_pin(vm); 545 if (err) 546 goto out_unlock_outer; 547 } 548 549 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 550 551 drm_exec_until_all_locked(&exec) { 552 bool done = false; 553 554 err = xe_preempt_work_begin(&exec, vm, &done); 555 drm_exec_retry_on_contention(&exec); 556 if (err || done) { 557 drm_exec_fini(&exec); 558 if (err && xe_vm_validate_should_retry(&exec, err, &end)) 559 err = -EAGAIN; 560 561 goto out_unlock_outer; 562 } 563 } 564 565 err = alloc_preempt_fences(vm, &preempt_fences, &fence_count); 566 if (err) 567 goto out_unlock; 568 569 rebind_fence = xe_vm_rebind(vm, true); 570 if (IS_ERR(rebind_fence)) { 571 err = PTR_ERR(rebind_fence); 572 goto out_unlock; 573 } 574 575 if (rebind_fence) { 576 dma_fence_wait(rebind_fence, false); 577 dma_fence_put(rebind_fence); 578 } 579 580 /* Wait on munmap style VM unbinds */ 581 wait = dma_resv_wait_timeout(xe_vm_resv(vm), 582 DMA_RESV_USAGE_KERNEL, 583 false, MAX_SCHEDULE_TIMEOUT); 584 if (wait <= 0) { 585 err = -ETIME; 586 goto out_unlock; 587 } 588 589 #define retry_required(__tries, __vm) \ 590 (IS_ENABLED(CONFIG_DRM_XE_USERPTR_INVAL_INJECT) ? \ 591 (!(__tries)++ || __xe_vm_userptr_needs_repin(__vm)) : \ 592 __xe_vm_userptr_needs_repin(__vm)) 593 594 down_read(&vm->userptr.notifier_lock); 595 if (retry_required(tries, vm)) { 596 up_read(&vm->userptr.notifier_lock); 597 err = -EAGAIN; 598 goto out_unlock; 599 } 600 601 #undef retry_required 602 603 spin_lock(&vm->xe->ttm.lru_lock); 604 ttm_lru_bulk_move_tail(&vm->lru_bulk_move); 605 spin_unlock(&vm->xe->ttm.lru_lock); 606 607 /* Point of no return. */ 608 arm_preempt_fences(vm, &preempt_fences); 609 resume_and_reinstall_preempt_fences(vm, &exec); 610 up_read(&vm->userptr.notifier_lock); 611 612 out_unlock: 613 drm_exec_fini(&exec); 614 out_unlock_outer: 615 if (err == -EAGAIN) { 616 trace_xe_vm_rebind_worker_retry(vm); 617 goto retry; 618 } 619 620 if (err) { 621 drm_warn(&vm->xe->drm, "VM worker error: %d\n", err); 622 xe_vm_kill(vm); 623 } 624 up_write(&vm->lock); 625 626 free_preempt_fences(&preempt_fences); 627 628 trace_xe_vm_rebind_worker_exit(vm); 629 } 630 631 static bool vma_userptr_invalidate(struct mmu_interval_notifier *mni, 632 const struct mmu_notifier_range *range, 633 unsigned long cur_seq) 634 { 635 struct xe_userptr *userptr = container_of(mni, typeof(*userptr), notifier); 636 struct xe_userptr_vma *uvma = container_of(userptr, typeof(*uvma), userptr); 637 struct xe_vma *vma = &uvma->vma; 638 struct xe_vm *vm = xe_vma_vm(vma); 639 struct dma_resv_iter cursor; 640 struct dma_fence *fence; 641 long err; 642 643 xe_assert(vm->xe, xe_vma_is_userptr(vma)); 644 trace_xe_vma_userptr_invalidate(vma); 645 646 if (!mmu_notifier_range_blockable(range)) 647 return false; 648 649 down_write(&vm->userptr.notifier_lock); 650 mmu_interval_set_seq(mni, cur_seq); 651 652 /* No need to stop gpu access if the userptr is not yet bound. */ 653 if (!userptr->initial_bind) { 654 up_write(&vm->userptr.notifier_lock); 655 return true; 656 } 657 658 /* 659 * Tell exec and rebind worker they need to repin and rebind this 660 * userptr. 661 */ 662 if (!xe_vm_in_fault_mode(vm) && 663 !(vma->gpuva.flags & XE_VMA_DESTROYED) && vma->tile_present) { 664 spin_lock(&vm->userptr.invalidated_lock); 665 list_move_tail(&userptr->invalidate_link, 666 &vm->userptr.invalidated); 667 spin_unlock(&vm->userptr.invalidated_lock); 668 } 669 670 up_write(&vm->userptr.notifier_lock); 671 672 /* 673 * Preempt fences turn into schedule disables, pipeline these. 674 * Note that even in fault mode, we need to wait for binds and 675 * unbinds to complete, and those are attached as BOOKMARK fences 676 * to the vm. 677 */ 678 dma_resv_iter_begin(&cursor, xe_vm_resv(vm), 679 DMA_RESV_USAGE_BOOKKEEP); 680 dma_resv_for_each_fence_unlocked(&cursor, fence) 681 dma_fence_enable_sw_signaling(fence); 682 dma_resv_iter_end(&cursor); 683 684 err = dma_resv_wait_timeout(xe_vm_resv(vm), 685 DMA_RESV_USAGE_BOOKKEEP, 686 false, MAX_SCHEDULE_TIMEOUT); 687 XE_WARN_ON(err <= 0); 688 689 if (xe_vm_in_fault_mode(vm)) { 690 err = xe_vm_invalidate_vma(vma); 691 XE_WARN_ON(err); 692 } 693 694 trace_xe_vma_userptr_invalidate_complete(vma); 695 696 return true; 697 } 698 699 static const struct mmu_interval_notifier_ops vma_userptr_notifier_ops = { 700 .invalidate = vma_userptr_invalidate, 701 }; 702 703 int xe_vm_userptr_pin(struct xe_vm *vm) 704 { 705 struct xe_userptr_vma *uvma, *next; 706 int err = 0; 707 LIST_HEAD(tmp_evict); 708 709 lockdep_assert_held_write(&vm->lock); 710 711 /* Collect invalidated userptrs */ 712 spin_lock(&vm->userptr.invalidated_lock); 713 list_for_each_entry_safe(uvma, next, &vm->userptr.invalidated, 714 userptr.invalidate_link) { 715 list_del_init(&uvma->userptr.invalidate_link); 716 list_move_tail(&uvma->userptr.repin_link, 717 &vm->userptr.repin_list); 718 } 719 spin_unlock(&vm->userptr.invalidated_lock); 720 721 /* Pin and move to temporary list */ 722 list_for_each_entry_safe(uvma, next, &vm->userptr.repin_list, 723 userptr.repin_link) { 724 err = xe_vma_userptr_pin_pages(uvma); 725 if (err < 0) 726 return err; 727 728 list_del_init(&uvma->userptr.repin_link); 729 list_move_tail(&uvma->vma.combined_links.rebind, &vm->rebind_list); 730 } 731 732 return 0; 733 } 734 735 /** 736 * xe_vm_userptr_check_repin() - Check whether the VM might have userptrs 737 * that need repinning. 738 * @vm: The VM. 739 * 740 * This function does an advisory check for whether the VM has userptrs that 741 * need repinning. 742 * 743 * Return: 0 if there are no indications of userptrs needing repinning, 744 * -EAGAIN if there are. 745 */ 746 int xe_vm_userptr_check_repin(struct xe_vm *vm) 747 { 748 return (list_empty_careful(&vm->userptr.repin_list) && 749 list_empty_careful(&vm->userptr.invalidated)) ? 0 : -EAGAIN; 750 } 751 752 static struct dma_fence * 753 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, 754 struct xe_sync_entry *syncs, u32 num_syncs, 755 bool first_op, bool last_op); 756 757 struct dma_fence *xe_vm_rebind(struct xe_vm *vm, bool rebind_worker) 758 { 759 struct dma_fence *fence = NULL; 760 struct xe_vma *vma, *next; 761 762 lockdep_assert_held(&vm->lock); 763 if (xe_vm_in_lr_mode(vm) && !rebind_worker) 764 return NULL; 765 766 xe_vm_assert_held(vm); 767 list_for_each_entry_safe(vma, next, &vm->rebind_list, 768 combined_links.rebind) { 769 xe_assert(vm->xe, vma->tile_present); 770 771 list_del_init(&vma->combined_links.rebind); 772 dma_fence_put(fence); 773 if (rebind_worker) 774 trace_xe_vma_rebind_worker(vma); 775 else 776 trace_xe_vma_rebind_exec(vma); 777 fence = xe_vm_bind_vma(vma, NULL, NULL, 0, false, false); 778 if (IS_ERR(fence)) 779 return fence; 780 } 781 782 return fence; 783 } 784 785 static void xe_vma_free(struct xe_vma *vma) 786 { 787 if (xe_vma_is_userptr(vma)) 788 kfree(to_userptr_vma(vma)); 789 else 790 kfree(vma); 791 } 792 793 #define VMA_CREATE_FLAG_READ_ONLY BIT(0) 794 #define VMA_CREATE_FLAG_IS_NULL BIT(1) 795 796 static struct xe_vma *xe_vma_create(struct xe_vm *vm, 797 struct xe_bo *bo, 798 u64 bo_offset_or_userptr, 799 u64 start, u64 end, 800 u16 pat_index, unsigned int flags) 801 { 802 struct xe_vma *vma; 803 struct xe_tile *tile; 804 u8 id; 805 bool read_only = (flags & VMA_CREATE_FLAG_READ_ONLY); 806 bool is_null = (flags & VMA_CREATE_FLAG_IS_NULL); 807 808 xe_assert(vm->xe, start < end); 809 xe_assert(vm->xe, end < vm->size); 810 811 /* 812 * Allocate and ensure that the xe_vma_is_userptr() return 813 * matches what was allocated. 814 */ 815 if (!bo && !is_null) { 816 struct xe_userptr_vma *uvma = kzalloc(sizeof(*uvma), GFP_KERNEL); 817 818 if (!uvma) 819 return ERR_PTR(-ENOMEM); 820 821 vma = &uvma->vma; 822 } else { 823 vma = kzalloc(sizeof(*vma), GFP_KERNEL); 824 if (!vma) 825 return ERR_PTR(-ENOMEM); 826 827 if (is_null) 828 vma->gpuva.flags |= DRM_GPUVA_SPARSE; 829 if (bo) 830 vma->gpuva.gem.obj = &bo->ttm.base; 831 } 832 833 INIT_LIST_HEAD(&vma->combined_links.rebind); 834 835 INIT_LIST_HEAD(&vma->gpuva.gem.entry); 836 vma->gpuva.vm = &vm->gpuvm; 837 vma->gpuva.va.addr = start; 838 vma->gpuva.va.range = end - start + 1; 839 if (read_only) 840 vma->gpuva.flags |= XE_VMA_READ_ONLY; 841 842 for_each_tile(tile, vm->xe, id) 843 vma->tile_mask |= 0x1 << id; 844 845 if (GRAPHICS_VER(vm->xe) >= 20 || vm->xe->info.platform == XE_PVC) 846 vma->gpuva.flags |= XE_VMA_ATOMIC_PTE_BIT; 847 848 vma->pat_index = pat_index; 849 850 if (bo) { 851 struct drm_gpuvm_bo *vm_bo; 852 853 xe_bo_assert_held(bo); 854 855 vm_bo = drm_gpuvm_bo_obtain(vma->gpuva.vm, &bo->ttm.base); 856 if (IS_ERR(vm_bo)) { 857 xe_vma_free(vma); 858 return ERR_CAST(vm_bo); 859 } 860 861 drm_gpuvm_bo_extobj_add(vm_bo); 862 drm_gem_object_get(&bo->ttm.base); 863 vma->gpuva.gem.offset = bo_offset_or_userptr; 864 drm_gpuva_link(&vma->gpuva, vm_bo); 865 drm_gpuvm_bo_put(vm_bo); 866 } else /* userptr or null */ { 867 if (!is_null) { 868 struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr; 869 u64 size = end - start + 1; 870 int err; 871 872 INIT_LIST_HEAD(&userptr->invalidate_link); 873 INIT_LIST_HEAD(&userptr->repin_link); 874 vma->gpuva.gem.offset = bo_offset_or_userptr; 875 876 err = mmu_interval_notifier_insert(&userptr->notifier, 877 current->mm, 878 xe_vma_userptr(vma), size, 879 &vma_userptr_notifier_ops); 880 if (err) { 881 xe_vma_free(vma); 882 return ERR_PTR(err); 883 } 884 885 userptr->notifier_seq = LONG_MAX; 886 } 887 888 xe_vm_get(vm); 889 } 890 891 return vma; 892 } 893 894 static void xe_vma_destroy_late(struct xe_vma *vma) 895 { 896 struct xe_vm *vm = xe_vma_vm(vma); 897 struct xe_device *xe = vm->xe; 898 bool read_only = xe_vma_read_only(vma); 899 900 if (xe_vma_is_userptr(vma)) { 901 struct xe_userptr *userptr = &to_userptr_vma(vma)->userptr; 902 903 if (userptr->sg) { 904 dma_unmap_sgtable(xe->drm.dev, 905 userptr->sg, 906 read_only ? DMA_TO_DEVICE : 907 DMA_BIDIRECTIONAL, 0); 908 sg_free_table(userptr->sg); 909 userptr->sg = NULL; 910 } 911 912 /* 913 * Since userptr pages are not pinned, we can't remove 914 * the notifer until we're sure the GPU is not accessing 915 * them anymore 916 */ 917 mmu_interval_notifier_remove(&userptr->notifier); 918 xe_vm_put(vm); 919 } else if (xe_vma_is_null(vma)) { 920 xe_vm_put(vm); 921 } else { 922 xe_bo_put(xe_vma_bo(vma)); 923 } 924 925 xe_vma_free(vma); 926 } 927 928 static void vma_destroy_work_func(struct work_struct *w) 929 { 930 struct xe_vma *vma = 931 container_of(w, struct xe_vma, destroy_work); 932 933 xe_vma_destroy_late(vma); 934 } 935 936 static void vma_destroy_cb(struct dma_fence *fence, 937 struct dma_fence_cb *cb) 938 { 939 struct xe_vma *vma = container_of(cb, struct xe_vma, destroy_cb); 940 941 INIT_WORK(&vma->destroy_work, vma_destroy_work_func); 942 queue_work(system_unbound_wq, &vma->destroy_work); 943 } 944 945 static void xe_vma_destroy(struct xe_vma *vma, struct dma_fence *fence) 946 { 947 struct xe_vm *vm = xe_vma_vm(vma); 948 949 lockdep_assert_held_write(&vm->lock); 950 xe_assert(vm->xe, list_empty(&vma->combined_links.destroy)); 951 952 if (xe_vma_is_userptr(vma)) { 953 xe_assert(vm->xe, vma->gpuva.flags & XE_VMA_DESTROYED); 954 955 spin_lock(&vm->userptr.invalidated_lock); 956 list_del(&to_userptr_vma(vma)->userptr.invalidate_link); 957 spin_unlock(&vm->userptr.invalidated_lock); 958 } else if (!xe_vma_is_null(vma)) { 959 xe_bo_assert_held(xe_vma_bo(vma)); 960 961 drm_gpuva_unlink(&vma->gpuva); 962 } 963 964 xe_vm_assert_held(vm); 965 if (fence) { 966 int ret = dma_fence_add_callback(fence, &vma->destroy_cb, 967 vma_destroy_cb); 968 969 if (ret) { 970 XE_WARN_ON(ret != -ENOENT); 971 xe_vma_destroy_late(vma); 972 } 973 } else { 974 xe_vma_destroy_late(vma); 975 } 976 } 977 978 /** 979 * xe_vm_prepare_vma() - drm_exec utility to lock a vma 980 * @exec: The drm_exec object we're currently locking for. 981 * @vma: The vma for witch we want to lock the vm resv and any attached 982 * object's resv. 983 * @num_shared: The number of dma-fence slots to pre-allocate in the 984 * objects' reservation objects. 985 * 986 * Return: 0 on success, negative error code on error. In particular 987 * may return -EDEADLK on WW transaction contention and -EINTR if 988 * an interruptible wait is terminated by a signal. 989 */ 990 int xe_vm_prepare_vma(struct drm_exec *exec, struct xe_vma *vma, 991 unsigned int num_shared) 992 { 993 struct xe_vm *vm = xe_vma_vm(vma); 994 struct xe_bo *bo = xe_vma_bo(vma); 995 int err; 996 997 XE_WARN_ON(!vm); 998 if (num_shared) 999 err = drm_exec_prepare_obj(exec, xe_vm_obj(vm), num_shared); 1000 else 1001 err = drm_exec_lock_obj(exec, xe_vm_obj(vm)); 1002 if (!err && bo && !bo->vm) { 1003 if (num_shared) 1004 err = drm_exec_prepare_obj(exec, &bo->ttm.base, num_shared); 1005 else 1006 err = drm_exec_lock_obj(exec, &bo->ttm.base); 1007 } 1008 1009 return err; 1010 } 1011 1012 static void xe_vma_destroy_unlocked(struct xe_vma *vma) 1013 { 1014 struct drm_exec exec; 1015 int err; 1016 1017 drm_exec_init(&exec, 0, 0); 1018 drm_exec_until_all_locked(&exec) { 1019 err = xe_vm_prepare_vma(&exec, vma, 0); 1020 drm_exec_retry_on_contention(&exec); 1021 if (XE_WARN_ON(err)) 1022 break; 1023 } 1024 1025 xe_vma_destroy(vma, NULL); 1026 1027 drm_exec_fini(&exec); 1028 } 1029 1030 struct xe_vma * 1031 xe_vm_find_overlapping_vma(struct xe_vm *vm, u64 start, u64 range) 1032 { 1033 struct drm_gpuva *gpuva; 1034 1035 lockdep_assert_held(&vm->lock); 1036 1037 if (xe_vm_is_closed_or_banned(vm)) 1038 return NULL; 1039 1040 xe_assert(vm->xe, start + range <= vm->size); 1041 1042 gpuva = drm_gpuva_find_first(&vm->gpuvm, start, range); 1043 1044 return gpuva ? gpuva_to_vma(gpuva) : NULL; 1045 } 1046 1047 static int xe_vm_insert_vma(struct xe_vm *vm, struct xe_vma *vma) 1048 { 1049 int err; 1050 1051 xe_assert(vm->xe, xe_vma_vm(vma) == vm); 1052 lockdep_assert_held(&vm->lock); 1053 1054 err = drm_gpuva_insert(&vm->gpuvm, &vma->gpuva); 1055 XE_WARN_ON(err); /* Shouldn't be possible */ 1056 1057 return err; 1058 } 1059 1060 static void xe_vm_remove_vma(struct xe_vm *vm, struct xe_vma *vma) 1061 { 1062 xe_assert(vm->xe, xe_vma_vm(vma) == vm); 1063 lockdep_assert_held(&vm->lock); 1064 1065 drm_gpuva_remove(&vma->gpuva); 1066 if (vm->usm.last_fault_vma == vma) 1067 vm->usm.last_fault_vma = NULL; 1068 } 1069 1070 static struct drm_gpuva_op *xe_vm_op_alloc(void) 1071 { 1072 struct xe_vma_op *op; 1073 1074 op = kzalloc(sizeof(*op), GFP_KERNEL); 1075 1076 if (unlikely(!op)) 1077 return NULL; 1078 1079 return &op->base; 1080 } 1081 1082 static void xe_vm_free(struct drm_gpuvm *gpuvm); 1083 1084 static struct drm_gpuvm_ops gpuvm_ops = { 1085 .op_alloc = xe_vm_op_alloc, 1086 .vm_bo_validate = xe_gpuvm_validate, 1087 .vm_free = xe_vm_free, 1088 }; 1089 1090 static u64 pde_encode_pat_index(struct xe_device *xe, u16 pat_index) 1091 { 1092 u64 pte = 0; 1093 1094 if (pat_index & BIT(0)) 1095 pte |= XE_PPGTT_PTE_PAT0; 1096 1097 if (pat_index & BIT(1)) 1098 pte |= XE_PPGTT_PTE_PAT1; 1099 1100 return pte; 1101 } 1102 1103 static u64 pte_encode_pat_index(struct xe_device *xe, u16 pat_index, 1104 u32 pt_level) 1105 { 1106 u64 pte = 0; 1107 1108 if (pat_index & BIT(0)) 1109 pte |= XE_PPGTT_PTE_PAT0; 1110 1111 if (pat_index & BIT(1)) 1112 pte |= XE_PPGTT_PTE_PAT1; 1113 1114 if (pat_index & BIT(2)) { 1115 if (pt_level) 1116 pte |= XE_PPGTT_PDE_PDPE_PAT2; 1117 else 1118 pte |= XE_PPGTT_PTE_PAT2; 1119 } 1120 1121 if (pat_index & BIT(3)) 1122 pte |= XELPG_PPGTT_PTE_PAT3; 1123 1124 if (pat_index & (BIT(4))) 1125 pte |= XE2_PPGTT_PTE_PAT4; 1126 1127 return pte; 1128 } 1129 1130 static u64 pte_encode_ps(u32 pt_level) 1131 { 1132 XE_WARN_ON(pt_level > MAX_HUGEPTE_LEVEL); 1133 1134 if (pt_level == 1) 1135 return XE_PDE_PS_2M; 1136 else if (pt_level == 2) 1137 return XE_PDPE_PS_1G; 1138 1139 return 0; 1140 } 1141 1142 static u64 xelp_pde_encode_bo(struct xe_bo *bo, u64 bo_offset, 1143 const u16 pat_index) 1144 { 1145 struct xe_device *xe = xe_bo_device(bo); 1146 u64 pde; 1147 1148 pde = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE); 1149 pde |= XE_PAGE_PRESENT | XE_PAGE_RW; 1150 pde |= pde_encode_pat_index(xe, pat_index); 1151 1152 return pde; 1153 } 1154 1155 static u64 xelp_pte_encode_bo(struct xe_bo *bo, u64 bo_offset, 1156 u16 pat_index, u32 pt_level) 1157 { 1158 struct xe_device *xe = xe_bo_device(bo); 1159 u64 pte; 1160 1161 pte = xe_bo_addr(bo, bo_offset, XE_PAGE_SIZE); 1162 pte |= XE_PAGE_PRESENT | XE_PAGE_RW; 1163 pte |= pte_encode_pat_index(xe, pat_index, pt_level); 1164 pte |= pte_encode_ps(pt_level); 1165 1166 if (xe_bo_is_vram(bo) || xe_bo_is_stolen_devmem(bo)) 1167 pte |= XE_PPGTT_PTE_DM; 1168 1169 return pte; 1170 } 1171 1172 static u64 xelp_pte_encode_vma(u64 pte, struct xe_vma *vma, 1173 u16 pat_index, u32 pt_level) 1174 { 1175 struct xe_device *xe = xe_vma_vm(vma)->xe; 1176 1177 pte |= XE_PAGE_PRESENT; 1178 1179 if (likely(!xe_vma_read_only(vma))) 1180 pte |= XE_PAGE_RW; 1181 1182 pte |= pte_encode_pat_index(xe, pat_index, pt_level); 1183 pte |= pte_encode_ps(pt_level); 1184 1185 if (unlikely(xe_vma_is_null(vma))) 1186 pte |= XE_PTE_NULL; 1187 1188 return pte; 1189 } 1190 1191 static u64 xelp_pte_encode_addr(struct xe_device *xe, u64 addr, 1192 u16 pat_index, 1193 u32 pt_level, bool devmem, u64 flags) 1194 { 1195 u64 pte; 1196 1197 /* Avoid passing random bits directly as flags */ 1198 xe_assert(xe, !(flags & ~XE_PTE_PS64)); 1199 1200 pte = addr; 1201 pte |= XE_PAGE_PRESENT | XE_PAGE_RW; 1202 pte |= pte_encode_pat_index(xe, pat_index, pt_level); 1203 pte |= pte_encode_ps(pt_level); 1204 1205 if (devmem) 1206 pte |= XE_PPGTT_PTE_DM; 1207 1208 pte |= flags; 1209 1210 return pte; 1211 } 1212 1213 static const struct xe_pt_ops xelp_pt_ops = { 1214 .pte_encode_bo = xelp_pte_encode_bo, 1215 .pte_encode_vma = xelp_pte_encode_vma, 1216 .pte_encode_addr = xelp_pte_encode_addr, 1217 .pde_encode_bo = xelp_pde_encode_bo, 1218 }; 1219 1220 static void vm_destroy_work_func(struct work_struct *w); 1221 1222 /** 1223 * xe_vm_create_scratch() - Setup a scratch memory pagetable tree for the 1224 * given tile and vm. 1225 * @xe: xe device. 1226 * @tile: tile to set up for. 1227 * @vm: vm to set up for. 1228 * 1229 * Sets up a pagetable tree with one page-table per level and a single 1230 * leaf PTE. All pagetable entries point to the single page-table or, 1231 * for MAX_HUGEPTE_LEVEL, a NULL huge PTE returning 0 on read and 1232 * writes become NOPs. 1233 * 1234 * Return: 0 on success, negative error code on error. 1235 */ 1236 static int xe_vm_create_scratch(struct xe_device *xe, struct xe_tile *tile, 1237 struct xe_vm *vm) 1238 { 1239 u8 id = tile->id; 1240 int i; 1241 1242 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; i++) { 1243 vm->scratch_pt[id][i] = xe_pt_create(vm, tile, i); 1244 if (IS_ERR(vm->scratch_pt[id][i])) 1245 return PTR_ERR(vm->scratch_pt[id][i]); 1246 1247 xe_pt_populate_empty(tile, vm, vm->scratch_pt[id][i]); 1248 } 1249 1250 return 0; 1251 } 1252 1253 static void xe_vm_free_scratch(struct xe_vm *vm) 1254 { 1255 struct xe_tile *tile; 1256 u8 id; 1257 1258 if (!xe_vm_has_scratch(vm)) 1259 return; 1260 1261 for_each_tile(tile, vm->xe, id) { 1262 u32 i; 1263 1264 if (!vm->pt_root[id]) 1265 continue; 1266 1267 for (i = MAX_HUGEPTE_LEVEL; i < vm->pt_root[id]->level; ++i) 1268 if (vm->scratch_pt[id][i]) 1269 xe_pt_destroy(vm->scratch_pt[id][i], vm->flags, NULL); 1270 } 1271 } 1272 1273 struct xe_vm *xe_vm_create(struct xe_device *xe, u32 flags) 1274 { 1275 struct drm_gem_object *vm_resv_obj; 1276 struct xe_vm *vm; 1277 int err, number_tiles = 0; 1278 struct xe_tile *tile; 1279 u8 id; 1280 1281 vm = kzalloc(sizeof(*vm), GFP_KERNEL); 1282 if (!vm) 1283 return ERR_PTR(-ENOMEM); 1284 1285 vm->xe = xe; 1286 1287 vm->size = 1ull << xe->info.va_bits; 1288 1289 vm->flags = flags; 1290 1291 init_rwsem(&vm->lock); 1292 1293 INIT_LIST_HEAD(&vm->rebind_list); 1294 1295 INIT_LIST_HEAD(&vm->userptr.repin_list); 1296 INIT_LIST_HEAD(&vm->userptr.invalidated); 1297 init_rwsem(&vm->userptr.notifier_lock); 1298 spin_lock_init(&vm->userptr.invalidated_lock); 1299 1300 INIT_WORK(&vm->destroy_work, vm_destroy_work_func); 1301 1302 INIT_LIST_HEAD(&vm->preempt.exec_queues); 1303 vm->preempt.min_run_period_ms = 10; /* FIXME: Wire up to uAPI */ 1304 1305 for_each_tile(tile, xe, id) 1306 xe_range_fence_tree_init(&vm->rftree[id]); 1307 1308 vm->pt_ops = &xelp_pt_ops; 1309 1310 if (!(flags & XE_VM_FLAG_MIGRATION)) 1311 xe_device_mem_access_get(xe); 1312 1313 vm_resv_obj = drm_gpuvm_resv_object_alloc(&xe->drm); 1314 if (!vm_resv_obj) { 1315 err = -ENOMEM; 1316 goto err_no_resv; 1317 } 1318 1319 drm_gpuvm_init(&vm->gpuvm, "Xe VM", DRM_GPUVM_RESV_PROTECTED, &xe->drm, 1320 vm_resv_obj, 0, vm->size, 0, 0, &gpuvm_ops); 1321 1322 drm_gem_object_put(vm_resv_obj); 1323 1324 err = dma_resv_lock_interruptible(xe_vm_resv(vm), NULL); 1325 if (err) 1326 goto err_close; 1327 1328 if (IS_DGFX(xe) && xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) 1329 vm->flags |= XE_VM_FLAG_64K; 1330 1331 for_each_tile(tile, xe, id) { 1332 if (flags & XE_VM_FLAG_MIGRATION && 1333 tile->id != XE_VM_FLAG_TILE_ID(flags)) 1334 continue; 1335 1336 vm->pt_root[id] = xe_pt_create(vm, tile, xe->info.vm_max_level); 1337 if (IS_ERR(vm->pt_root[id])) { 1338 err = PTR_ERR(vm->pt_root[id]); 1339 vm->pt_root[id] = NULL; 1340 goto err_unlock_close; 1341 } 1342 } 1343 1344 if (xe_vm_has_scratch(vm)) { 1345 for_each_tile(tile, xe, id) { 1346 if (!vm->pt_root[id]) 1347 continue; 1348 1349 err = xe_vm_create_scratch(xe, tile, vm); 1350 if (err) 1351 goto err_unlock_close; 1352 } 1353 vm->batch_invalidate_tlb = true; 1354 } 1355 1356 if (flags & XE_VM_FLAG_LR_MODE) { 1357 INIT_WORK(&vm->preempt.rebind_work, preempt_rebind_work_func); 1358 vm->flags |= XE_VM_FLAG_LR_MODE; 1359 vm->batch_invalidate_tlb = false; 1360 } 1361 1362 /* Fill pt_root after allocating scratch tables */ 1363 for_each_tile(tile, xe, id) { 1364 if (!vm->pt_root[id]) 1365 continue; 1366 1367 xe_pt_populate_empty(tile, vm, vm->pt_root[id]); 1368 } 1369 dma_resv_unlock(xe_vm_resv(vm)); 1370 1371 /* Kernel migration VM shouldn't have a circular loop.. */ 1372 if (!(flags & XE_VM_FLAG_MIGRATION)) { 1373 for_each_tile(tile, xe, id) { 1374 struct xe_gt *gt = tile->primary_gt; 1375 struct xe_vm *migrate_vm; 1376 struct xe_exec_queue *q; 1377 u32 create_flags = EXEC_QUEUE_FLAG_VM; 1378 1379 if (!vm->pt_root[id]) 1380 continue; 1381 1382 migrate_vm = xe_migrate_get_vm(tile->migrate); 1383 q = xe_exec_queue_create_class(xe, gt, migrate_vm, 1384 XE_ENGINE_CLASS_COPY, 1385 create_flags); 1386 xe_vm_put(migrate_vm); 1387 if (IS_ERR(q)) { 1388 err = PTR_ERR(q); 1389 goto err_close; 1390 } 1391 vm->q[id] = q; 1392 number_tiles++; 1393 } 1394 } 1395 1396 if (number_tiles > 1) 1397 vm->composite_fence_ctx = dma_fence_context_alloc(1); 1398 1399 mutex_lock(&xe->usm.lock); 1400 if (flags & XE_VM_FLAG_FAULT_MODE) 1401 xe->usm.num_vm_in_fault_mode++; 1402 else if (!(flags & XE_VM_FLAG_MIGRATION)) 1403 xe->usm.num_vm_in_non_fault_mode++; 1404 mutex_unlock(&xe->usm.lock); 1405 1406 trace_xe_vm_create(vm); 1407 1408 return vm; 1409 1410 err_unlock_close: 1411 dma_resv_unlock(xe_vm_resv(vm)); 1412 err_close: 1413 xe_vm_close_and_put(vm); 1414 return ERR_PTR(err); 1415 1416 err_no_resv: 1417 for_each_tile(tile, xe, id) 1418 xe_range_fence_tree_fini(&vm->rftree[id]); 1419 kfree(vm); 1420 if (!(flags & XE_VM_FLAG_MIGRATION)) 1421 xe_device_mem_access_put(xe); 1422 return ERR_PTR(err); 1423 } 1424 1425 static void xe_vm_close(struct xe_vm *vm) 1426 { 1427 down_write(&vm->lock); 1428 vm->size = 0; 1429 up_write(&vm->lock); 1430 } 1431 1432 void xe_vm_close_and_put(struct xe_vm *vm) 1433 { 1434 LIST_HEAD(contested); 1435 struct xe_device *xe = vm->xe; 1436 struct xe_tile *tile; 1437 struct xe_vma *vma, *next_vma; 1438 struct drm_gpuva *gpuva, *next; 1439 u8 id; 1440 1441 xe_assert(xe, !vm->preempt.num_exec_queues); 1442 1443 xe_vm_close(vm); 1444 if (xe_vm_in_preempt_fence_mode(vm)) 1445 flush_work(&vm->preempt.rebind_work); 1446 1447 down_write(&vm->lock); 1448 for_each_tile(tile, xe, id) { 1449 if (vm->q[id]) 1450 xe_exec_queue_last_fence_put(vm->q[id], vm); 1451 } 1452 up_write(&vm->lock); 1453 1454 for_each_tile(tile, xe, id) { 1455 if (vm->q[id]) { 1456 xe_exec_queue_kill(vm->q[id]); 1457 xe_exec_queue_put(vm->q[id]); 1458 vm->q[id] = NULL; 1459 } 1460 } 1461 1462 down_write(&vm->lock); 1463 xe_vm_lock(vm, false); 1464 drm_gpuvm_for_each_va_safe(gpuva, next, &vm->gpuvm) { 1465 vma = gpuva_to_vma(gpuva); 1466 1467 if (xe_vma_has_no_bo(vma)) { 1468 down_read(&vm->userptr.notifier_lock); 1469 vma->gpuva.flags |= XE_VMA_DESTROYED; 1470 up_read(&vm->userptr.notifier_lock); 1471 } 1472 1473 xe_vm_remove_vma(vm, vma); 1474 1475 /* easy case, remove from VMA? */ 1476 if (xe_vma_has_no_bo(vma) || xe_vma_bo(vma)->vm) { 1477 list_del_init(&vma->combined_links.rebind); 1478 xe_vma_destroy(vma, NULL); 1479 continue; 1480 } 1481 1482 list_move_tail(&vma->combined_links.destroy, &contested); 1483 vma->gpuva.flags |= XE_VMA_DESTROYED; 1484 } 1485 1486 /* 1487 * All vm operations will add shared fences to resv. 1488 * The only exception is eviction for a shared object, 1489 * but even so, the unbind when evicted would still 1490 * install a fence to resv. Hence it's safe to 1491 * destroy the pagetables immediately. 1492 */ 1493 xe_vm_free_scratch(vm); 1494 1495 for_each_tile(tile, xe, id) { 1496 if (vm->pt_root[id]) { 1497 xe_pt_destroy(vm->pt_root[id], vm->flags, NULL); 1498 vm->pt_root[id] = NULL; 1499 } 1500 } 1501 xe_vm_unlock(vm); 1502 1503 /* 1504 * VM is now dead, cannot re-add nodes to vm->vmas if it's NULL 1505 * Since we hold a refcount to the bo, we can remove and free 1506 * the members safely without locking. 1507 */ 1508 list_for_each_entry_safe(vma, next_vma, &contested, 1509 combined_links.destroy) { 1510 list_del_init(&vma->combined_links.destroy); 1511 xe_vma_destroy_unlocked(vma); 1512 } 1513 1514 up_write(&vm->lock); 1515 1516 mutex_lock(&xe->usm.lock); 1517 if (vm->flags & XE_VM_FLAG_FAULT_MODE) 1518 xe->usm.num_vm_in_fault_mode--; 1519 else if (!(vm->flags & XE_VM_FLAG_MIGRATION)) 1520 xe->usm.num_vm_in_non_fault_mode--; 1521 mutex_unlock(&xe->usm.lock); 1522 1523 for_each_tile(tile, xe, id) 1524 xe_range_fence_tree_fini(&vm->rftree[id]); 1525 1526 xe_vm_put(vm); 1527 } 1528 1529 static void vm_destroy_work_func(struct work_struct *w) 1530 { 1531 struct xe_vm *vm = 1532 container_of(w, struct xe_vm, destroy_work); 1533 struct xe_device *xe = vm->xe; 1534 struct xe_tile *tile; 1535 u8 id; 1536 void *lookup; 1537 1538 /* xe_vm_close_and_put was not called? */ 1539 xe_assert(xe, !vm->size); 1540 1541 if (!(vm->flags & XE_VM_FLAG_MIGRATION)) { 1542 xe_device_mem_access_put(xe); 1543 1544 if (xe->info.has_asid && vm->usm.asid) { 1545 mutex_lock(&xe->usm.lock); 1546 lookup = xa_erase(&xe->usm.asid_to_vm, vm->usm.asid); 1547 xe_assert(xe, lookup == vm); 1548 mutex_unlock(&xe->usm.lock); 1549 } 1550 } 1551 1552 for_each_tile(tile, xe, id) 1553 XE_WARN_ON(vm->pt_root[id]); 1554 1555 trace_xe_vm_free(vm); 1556 dma_fence_put(vm->rebind_fence); 1557 kfree(vm); 1558 } 1559 1560 static void xe_vm_free(struct drm_gpuvm *gpuvm) 1561 { 1562 struct xe_vm *vm = container_of(gpuvm, struct xe_vm, gpuvm); 1563 1564 /* To destroy the VM we need to be able to sleep */ 1565 queue_work(system_unbound_wq, &vm->destroy_work); 1566 } 1567 1568 struct xe_vm *xe_vm_lookup(struct xe_file *xef, u32 id) 1569 { 1570 struct xe_vm *vm; 1571 1572 mutex_lock(&xef->vm.lock); 1573 vm = xa_load(&xef->vm.xa, id); 1574 if (vm) 1575 xe_vm_get(vm); 1576 mutex_unlock(&xef->vm.lock); 1577 1578 return vm; 1579 } 1580 1581 u64 xe_vm_pdp4_descriptor(struct xe_vm *vm, struct xe_tile *tile) 1582 { 1583 return vm->pt_ops->pde_encode_bo(vm->pt_root[tile->id]->bo, 0, 1584 tile_to_xe(tile)->pat.idx[XE_CACHE_WB]); 1585 } 1586 1587 static struct xe_exec_queue * 1588 to_wait_exec_queue(struct xe_vm *vm, struct xe_exec_queue *q) 1589 { 1590 return q ? q : vm->q[0]; 1591 } 1592 1593 static struct dma_fence * 1594 xe_vm_unbind_vma(struct xe_vma *vma, struct xe_exec_queue *q, 1595 struct xe_sync_entry *syncs, u32 num_syncs, 1596 bool first_op, bool last_op) 1597 { 1598 struct xe_vm *vm = xe_vma_vm(vma); 1599 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); 1600 struct xe_tile *tile; 1601 struct dma_fence *fence = NULL; 1602 struct dma_fence **fences = NULL; 1603 struct dma_fence_array *cf = NULL; 1604 int cur_fence = 0, i; 1605 int number_tiles = hweight8(vma->tile_present); 1606 int err; 1607 u8 id; 1608 1609 trace_xe_vma_unbind(vma); 1610 1611 if (number_tiles > 1) { 1612 fences = kmalloc_array(number_tiles, sizeof(*fences), 1613 GFP_KERNEL); 1614 if (!fences) 1615 return ERR_PTR(-ENOMEM); 1616 } 1617 1618 for_each_tile(tile, vm->xe, id) { 1619 if (!(vma->tile_present & BIT(id))) 1620 goto next; 1621 1622 fence = __xe_pt_unbind_vma(tile, vma, q ? q : vm->q[id], 1623 first_op ? syncs : NULL, 1624 first_op ? num_syncs : 0); 1625 if (IS_ERR(fence)) { 1626 err = PTR_ERR(fence); 1627 goto err_fences; 1628 } 1629 1630 if (fences) 1631 fences[cur_fence++] = fence; 1632 1633 next: 1634 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list)) 1635 q = list_next_entry(q, multi_gt_list); 1636 } 1637 1638 if (fences) { 1639 cf = dma_fence_array_create(number_tiles, fences, 1640 vm->composite_fence_ctx, 1641 vm->composite_fence_seqno++, 1642 false); 1643 if (!cf) { 1644 --vm->composite_fence_seqno; 1645 err = -ENOMEM; 1646 goto err_fences; 1647 } 1648 } 1649 1650 fence = cf ? &cf->base : !fence ? 1651 xe_exec_queue_last_fence_get(wait_exec_queue, vm) : fence; 1652 if (last_op) { 1653 for (i = 0; i < num_syncs; i++) 1654 xe_sync_entry_signal(&syncs[i], NULL, fence); 1655 } 1656 1657 return fence; 1658 1659 err_fences: 1660 if (fences) { 1661 while (cur_fence) 1662 dma_fence_put(fences[--cur_fence]); 1663 kfree(fences); 1664 } 1665 1666 return ERR_PTR(err); 1667 } 1668 1669 static struct dma_fence * 1670 xe_vm_bind_vma(struct xe_vma *vma, struct xe_exec_queue *q, 1671 struct xe_sync_entry *syncs, u32 num_syncs, 1672 bool first_op, bool last_op) 1673 { 1674 struct xe_tile *tile; 1675 struct dma_fence *fence; 1676 struct dma_fence **fences = NULL; 1677 struct dma_fence_array *cf = NULL; 1678 struct xe_vm *vm = xe_vma_vm(vma); 1679 int cur_fence = 0, i; 1680 int number_tiles = hweight8(vma->tile_mask); 1681 int err; 1682 u8 id; 1683 1684 trace_xe_vma_bind(vma); 1685 1686 if (number_tiles > 1) { 1687 fences = kmalloc_array(number_tiles, sizeof(*fences), 1688 GFP_KERNEL); 1689 if (!fences) 1690 return ERR_PTR(-ENOMEM); 1691 } 1692 1693 for_each_tile(tile, vm->xe, id) { 1694 if (!(vma->tile_mask & BIT(id))) 1695 goto next; 1696 1697 fence = __xe_pt_bind_vma(tile, vma, q ? q : vm->q[id], 1698 first_op ? syncs : NULL, 1699 first_op ? num_syncs : 0, 1700 vma->tile_present & BIT(id)); 1701 if (IS_ERR(fence)) { 1702 err = PTR_ERR(fence); 1703 goto err_fences; 1704 } 1705 1706 if (fences) 1707 fences[cur_fence++] = fence; 1708 1709 next: 1710 if (q && vm->pt_root[id] && !list_empty(&q->multi_gt_list)) 1711 q = list_next_entry(q, multi_gt_list); 1712 } 1713 1714 if (fences) { 1715 cf = dma_fence_array_create(number_tiles, fences, 1716 vm->composite_fence_ctx, 1717 vm->composite_fence_seqno++, 1718 false); 1719 if (!cf) { 1720 --vm->composite_fence_seqno; 1721 err = -ENOMEM; 1722 goto err_fences; 1723 } 1724 } 1725 1726 if (last_op) { 1727 for (i = 0; i < num_syncs; i++) 1728 xe_sync_entry_signal(&syncs[i], NULL, 1729 cf ? &cf->base : fence); 1730 } 1731 1732 return cf ? &cf->base : fence; 1733 1734 err_fences: 1735 if (fences) { 1736 while (cur_fence) 1737 dma_fence_put(fences[--cur_fence]); 1738 kfree(fences); 1739 } 1740 1741 return ERR_PTR(err); 1742 } 1743 1744 static int __xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, 1745 struct xe_exec_queue *q, struct xe_sync_entry *syncs, 1746 u32 num_syncs, bool immediate, bool first_op, 1747 bool last_op) 1748 { 1749 struct dma_fence *fence; 1750 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); 1751 1752 xe_vm_assert_held(vm); 1753 1754 if (immediate) { 1755 fence = xe_vm_bind_vma(vma, q, syncs, num_syncs, first_op, 1756 last_op); 1757 if (IS_ERR(fence)) 1758 return PTR_ERR(fence); 1759 } else { 1760 int i; 1761 1762 xe_assert(vm->xe, xe_vm_in_fault_mode(vm)); 1763 1764 fence = xe_exec_queue_last_fence_get(wait_exec_queue, vm); 1765 if (last_op) { 1766 for (i = 0; i < num_syncs; i++) 1767 xe_sync_entry_signal(&syncs[i], NULL, fence); 1768 } 1769 } 1770 1771 if (last_op) 1772 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); 1773 dma_fence_put(fence); 1774 1775 return 0; 1776 } 1777 1778 static int xe_vm_bind(struct xe_vm *vm, struct xe_vma *vma, struct xe_exec_queue *q, 1779 struct xe_bo *bo, struct xe_sync_entry *syncs, 1780 u32 num_syncs, bool immediate, bool first_op, 1781 bool last_op) 1782 { 1783 int err; 1784 1785 xe_vm_assert_held(vm); 1786 xe_bo_assert_held(bo); 1787 1788 if (bo && immediate) { 1789 err = xe_bo_validate(bo, vm, true); 1790 if (err) 1791 return err; 1792 } 1793 1794 return __xe_vm_bind(vm, vma, q, syncs, num_syncs, immediate, first_op, 1795 last_op); 1796 } 1797 1798 static int xe_vm_unbind(struct xe_vm *vm, struct xe_vma *vma, 1799 struct xe_exec_queue *q, struct xe_sync_entry *syncs, 1800 u32 num_syncs, bool first_op, bool last_op) 1801 { 1802 struct dma_fence *fence; 1803 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); 1804 1805 xe_vm_assert_held(vm); 1806 xe_bo_assert_held(xe_vma_bo(vma)); 1807 1808 fence = xe_vm_unbind_vma(vma, q, syncs, num_syncs, first_op, last_op); 1809 if (IS_ERR(fence)) 1810 return PTR_ERR(fence); 1811 1812 xe_vma_destroy(vma, fence); 1813 if (last_op) 1814 xe_exec_queue_last_fence_set(wait_exec_queue, vm, fence); 1815 dma_fence_put(fence); 1816 1817 return 0; 1818 } 1819 1820 #define ALL_DRM_XE_VM_CREATE_FLAGS (DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE | \ 1821 DRM_XE_VM_CREATE_FLAG_LR_MODE | \ 1822 DRM_XE_VM_CREATE_FLAG_FAULT_MODE) 1823 1824 int xe_vm_create_ioctl(struct drm_device *dev, void *data, 1825 struct drm_file *file) 1826 { 1827 struct xe_device *xe = to_xe_device(dev); 1828 struct xe_file *xef = to_xe_file(file); 1829 struct drm_xe_vm_create *args = data; 1830 struct xe_tile *tile; 1831 struct xe_vm *vm; 1832 u32 id, asid; 1833 int err; 1834 u32 flags = 0; 1835 1836 if (XE_IOCTL_DBG(xe, args->extensions)) 1837 return -EINVAL; 1838 1839 if (XE_WA(xe_root_mmio_gt(xe), 14016763929)) 1840 args->flags |= DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE; 1841 1842 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE && 1843 !xe->info.has_usm)) 1844 return -EINVAL; 1845 1846 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 1847 return -EINVAL; 1848 1849 if (XE_IOCTL_DBG(xe, args->flags & ~ALL_DRM_XE_VM_CREATE_FLAGS)) 1850 return -EINVAL; 1851 1852 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE && 1853 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)) 1854 return -EINVAL; 1855 1856 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) && 1857 args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE)) 1858 return -EINVAL; 1859 1860 if (XE_IOCTL_DBG(xe, args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE && 1861 xe_device_in_non_fault_mode(xe))) 1862 return -EINVAL; 1863 1864 if (XE_IOCTL_DBG(xe, !(args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) && 1865 xe_device_in_fault_mode(xe))) 1866 return -EINVAL; 1867 1868 if (XE_IOCTL_DBG(xe, args->extensions)) 1869 return -EINVAL; 1870 1871 if (args->flags & DRM_XE_VM_CREATE_FLAG_SCRATCH_PAGE) 1872 flags |= XE_VM_FLAG_SCRATCH_PAGE; 1873 if (args->flags & DRM_XE_VM_CREATE_FLAG_LR_MODE) 1874 flags |= XE_VM_FLAG_LR_MODE; 1875 if (args->flags & DRM_XE_VM_CREATE_FLAG_FAULT_MODE) 1876 flags |= XE_VM_FLAG_FAULT_MODE; 1877 1878 vm = xe_vm_create(xe, flags); 1879 if (IS_ERR(vm)) 1880 return PTR_ERR(vm); 1881 1882 mutex_lock(&xef->vm.lock); 1883 err = xa_alloc(&xef->vm.xa, &id, vm, xa_limit_32b, GFP_KERNEL); 1884 mutex_unlock(&xef->vm.lock); 1885 if (err) 1886 goto err_close_and_put; 1887 1888 if (xe->info.has_asid) { 1889 mutex_lock(&xe->usm.lock); 1890 err = xa_alloc_cyclic(&xe->usm.asid_to_vm, &asid, vm, 1891 XA_LIMIT(1, XE_MAX_ASID - 1), 1892 &xe->usm.next_asid, GFP_KERNEL); 1893 mutex_unlock(&xe->usm.lock); 1894 if (err < 0) 1895 goto err_free_id; 1896 1897 vm->usm.asid = asid; 1898 } 1899 1900 args->vm_id = id; 1901 vm->xef = xef; 1902 1903 /* Record BO memory for VM pagetable created against client */ 1904 for_each_tile(tile, xe, id) 1905 if (vm->pt_root[id]) 1906 xe_drm_client_add_bo(vm->xef->client, vm->pt_root[id]->bo); 1907 1908 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_MEM) 1909 /* Warning: Security issue - never enable by default */ 1910 args->reserved[0] = xe_bo_main_addr(vm->pt_root[0]->bo, XE_PAGE_SIZE); 1911 #endif 1912 1913 return 0; 1914 1915 err_free_id: 1916 mutex_lock(&xef->vm.lock); 1917 xa_erase(&xef->vm.xa, id); 1918 mutex_unlock(&xef->vm.lock); 1919 err_close_and_put: 1920 xe_vm_close_and_put(vm); 1921 1922 return err; 1923 } 1924 1925 int xe_vm_destroy_ioctl(struct drm_device *dev, void *data, 1926 struct drm_file *file) 1927 { 1928 struct xe_device *xe = to_xe_device(dev); 1929 struct xe_file *xef = to_xe_file(file); 1930 struct drm_xe_vm_destroy *args = data; 1931 struct xe_vm *vm; 1932 int err = 0; 1933 1934 if (XE_IOCTL_DBG(xe, args->pad) || 1935 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 1936 return -EINVAL; 1937 1938 mutex_lock(&xef->vm.lock); 1939 vm = xa_load(&xef->vm.xa, args->vm_id); 1940 if (XE_IOCTL_DBG(xe, !vm)) 1941 err = -ENOENT; 1942 else if (XE_IOCTL_DBG(xe, vm->preempt.num_exec_queues)) 1943 err = -EBUSY; 1944 else 1945 xa_erase(&xef->vm.xa, args->vm_id); 1946 mutex_unlock(&xef->vm.lock); 1947 1948 if (!err) 1949 xe_vm_close_and_put(vm); 1950 1951 return err; 1952 } 1953 1954 static const u32 region_to_mem_type[] = { 1955 XE_PL_TT, 1956 XE_PL_VRAM0, 1957 XE_PL_VRAM1, 1958 }; 1959 1960 static int xe_vm_prefetch(struct xe_vm *vm, struct xe_vma *vma, 1961 struct xe_exec_queue *q, u32 region, 1962 struct xe_sync_entry *syncs, u32 num_syncs, 1963 bool first_op, bool last_op) 1964 { 1965 struct xe_exec_queue *wait_exec_queue = to_wait_exec_queue(vm, q); 1966 int err; 1967 1968 xe_assert(vm->xe, region <= ARRAY_SIZE(region_to_mem_type)); 1969 1970 if (!xe_vma_has_no_bo(vma)) { 1971 err = xe_bo_migrate(xe_vma_bo(vma), region_to_mem_type[region]); 1972 if (err) 1973 return err; 1974 } 1975 1976 if (vma->tile_mask != (vma->tile_present & ~vma->usm.tile_invalidated)) { 1977 return xe_vm_bind(vm, vma, q, xe_vma_bo(vma), syncs, num_syncs, 1978 true, first_op, last_op); 1979 } else { 1980 int i; 1981 1982 /* Nothing to do, signal fences now */ 1983 if (last_op) { 1984 for (i = 0; i < num_syncs; i++) { 1985 struct dma_fence *fence = 1986 xe_exec_queue_last_fence_get(wait_exec_queue, vm); 1987 1988 xe_sync_entry_signal(&syncs[i], NULL, fence); 1989 dma_fence_put(fence); 1990 } 1991 } 1992 1993 return 0; 1994 } 1995 } 1996 1997 static void prep_vma_destroy(struct xe_vm *vm, struct xe_vma *vma, 1998 bool post_commit) 1999 { 2000 down_read(&vm->userptr.notifier_lock); 2001 vma->gpuva.flags |= XE_VMA_DESTROYED; 2002 up_read(&vm->userptr.notifier_lock); 2003 if (post_commit) 2004 xe_vm_remove_vma(vm, vma); 2005 } 2006 2007 #undef ULL 2008 #define ULL unsigned long long 2009 2010 #if IS_ENABLED(CONFIG_DRM_XE_DEBUG_VM) 2011 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op) 2012 { 2013 struct xe_vma *vma; 2014 2015 switch (op->op) { 2016 case DRM_GPUVA_OP_MAP: 2017 vm_dbg(&xe->drm, "MAP: addr=0x%016llx, range=0x%016llx", 2018 (ULL)op->map.va.addr, (ULL)op->map.va.range); 2019 break; 2020 case DRM_GPUVA_OP_REMAP: 2021 vma = gpuva_to_vma(op->remap.unmap->va); 2022 vm_dbg(&xe->drm, "REMAP:UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d", 2023 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma), 2024 op->remap.unmap->keep ? 1 : 0); 2025 if (op->remap.prev) 2026 vm_dbg(&xe->drm, 2027 "REMAP:PREV: addr=0x%016llx, range=0x%016llx", 2028 (ULL)op->remap.prev->va.addr, 2029 (ULL)op->remap.prev->va.range); 2030 if (op->remap.next) 2031 vm_dbg(&xe->drm, 2032 "REMAP:NEXT: addr=0x%016llx, range=0x%016llx", 2033 (ULL)op->remap.next->va.addr, 2034 (ULL)op->remap.next->va.range); 2035 break; 2036 case DRM_GPUVA_OP_UNMAP: 2037 vma = gpuva_to_vma(op->unmap.va); 2038 vm_dbg(&xe->drm, "UNMAP: addr=0x%016llx, range=0x%016llx, keep=%d", 2039 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma), 2040 op->unmap.keep ? 1 : 0); 2041 break; 2042 case DRM_GPUVA_OP_PREFETCH: 2043 vma = gpuva_to_vma(op->prefetch.va); 2044 vm_dbg(&xe->drm, "PREFETCH: addr=0x%016llx, range=0x%016llx", 2045 (ULL)xe_vma_start(vma), (ULL)xe_vma_size(vma)); 2046 break; 2047 default: 2048 drm_warn(&xe->drm, "NOT POSSIBLE"); 2049 } 2050 } 2051 #else 2052 static void print_op(struct xe_device *xe, struct drm_gpuva_op *op) 2053 { 2054 } 2055 #endif 2056 2057 /* 2058 * Create operations list from IOCTL arguments, setup operations fields so parse 2059 * and commit steps are decoupled from IOCTL arguments. This step can fail. 2060 */ 2061 static struct drm_gpuva_ops * 2062 vm_bind_ioctl_ops_create(struct xe_vm *vm, struct xe_bo *bo, 2063 u64 bo_offset_or_userptr, u64 addr, u64 range, 2064 u32 operation, u32 flags, 2065 u32 prefetch_region, u16 pat_index) 2066 { 2067 struct drm_gem_object *obj = bo ? &bo->ttm.base : NULL; 2068 struct drm_gpuva_ops *ops; 2069 struct drm_gpuva_op *__op; 2070 struct drm_gpuvm_bo *vm_bo; 2071 int err; 2072 2073 lockdep_assert_held_write(&vm->lock); 2074 2075 vm_dbg(&vm->xe->drm, 2076 "op=%d, addr=0x%016llx, range=0x%016llx, bo_offset_or_userptr=0x%016llx", 2077 operation, (ULL)addr, (ULL)range, 2078 (ULL)bo_offset_or_userptr); 2079 2080 switch (operation) { 2081 case DRM_XE_VM_BIND_OP_MAP: 2082 case DRM_XE_VM_BIND_OP_MAP_USERPTR: 2083 ops = drm_gpuvm_sm_map_ops_create(&vm->gpuvm, addr, range, 2084 obj, bo_offset_or_userptr); 2085 break; 2086 case DRM_XE_VM_BIND_OP_UNMAP: 2087 ops = drm_gpuvm_sm_unmap_ops_create(&vm->gpuvm, addr, range); 2088 break; 2089 case DRM_XE_VM_BIND_OP_PREFETCH: 2090 ops = drm_gpuvm_prefetch_ops_create(&vm->gpuvm, addr, range); 2091 break; 2092 case DRM_XE_VM_BIND_OP_UNMAP_ALL: 2093 xe_assert(vm->xe, bo); 2094 2095 err = xe_bo_lock(bo, true); 2096 if (err) 2097 return ERR_PTR(err); 2098 2099 vm_bo = drm_gpuvm_bo_obtain(&vm->gpuvm, obj); 2100 if (IS_ERR(vm_bo)) { 2101 xe_bo_unlock(bo); 2102 return ERR_CAST(vm_bo); 2103 } 2104 2105 ops = drm_gpuvm_bo_unmap_ops_create(vm_bo); 2106 drm_gpuvm_bo_put(vm_bo); 2107 xe_bo_unlock(bo); 2108 break; 2109 default: 2110 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 2111 ops = ERR_PTR(-EINVAL); 2112 } 2113 if (IS_ERR(ops)) 2114 return ops; 2115 2116 drm_gpuva_for_each_op(__op, ops) { 2117 struct xe_vma_op *op = gpuva_op_to_vma_op(__op); 2118 2119 if (__op->op == DRM_GPUVA_OP_MAP) { 2120 op->map.immediate = 2121 flags & DRM_XE_VM_BIND_FLAG_IMMEDIATE; 2122 op->map.read_only = 2123 flags & DRM_XE_VM_BIND_FLAG_READONLY; 2124 op->map.is_null = flags & DRM_XE_VM_BIND_FLAG_NULL; 2125 op->map.pat_index = pat_index; 2126 } else if (__op->op == DRM_GPUVA_OP_PREFETCH) { 2127 op->prefetch.region = prefetch_region; 2128 } 2129 2130 print_op(vm->xe, __op); 2131 } 2132 2133 return ops; 2134 } 2135 2136 static struct xe_vma *new_vma(struct xe_vm *vm, struct drm_gpuva_op_map *op, 2137 u16 pat_index, unsigned int flags) 2138 { 2139 struct xe_bo *bo = op->gem.obj ? gem_to_xe_bo(op->gem.obj) : NULL; 2140 struct drm_exec exec; 2141 struct xe_vma *vma; 2142 int err; 2143 2144 lockdep_assert_held_write(&vm->lock); 2145 2146 if (bo) { 2147 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 2148 drm_exec_until_all_locked(&exec) { 2149 err = 0; 2150 if (!bo->vm) { 2151 err = drm_exec_lock_obj(&exec, xe_vm_obj(vm)); 2152 drm_exec_retry_on_contention(&exec); 2153 } 2154 if (!err) { 2155 err = drm_exec_lock_obj(&exec, &bo->ttm.base); 2156 drm_exec_retry_on_contention(&exec); 2157 } 2158 if (err) { 2159 drm_exec_fini(&exec); 2160 return ERR_PTR(err); 2161 } 2162 } 2163 } 2164 vma = xe_vma_create(vm, bo, op->gem.offset, 2165 op->va.addr, op->va.addr + 2166 op->va.range - 1, pat_index, flags); 2167 if (bo) 2168 drm_exec_fini(&exec); 2169 2170 if (xe_vma_is_userptr(vma)) { 2171 err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); 2172 if (err) { 2173 prep_vma_destroy(vm, vma, false); 2174 xe_vma_destroy_unlocked(vma); 2175 return ERR_PTR(err); 2176 } 2177 } else if (!xe_vma_has_no_bo(vma) && !bo->vm) { 2178 err = add_preempt_fences(vm, bo); 2179 if (err) { 2180 prep_vma_destroy(vm, vma, false); 2181 xe_vma_destroy_unlocked(vma); 2182 return ERR_PTR(err); 2183 } 2184 } 2185 2186 return vma; 2187 } 2188 2189 static u64 xe_vma_max_pte_size(struct xe_vma *vma) 2190 { 2191 if (vma->gpuva.flags & XE_VMA_PTE_1G) 2192 return SZ_1G; 2193 else if (vma->gpuva.flags & XE_VMA_PTE_2M) 2194 return SZ_2M; 2195 else if (vma->gpuva.flags & XE_VMA_PTE_4K) 2196 return SZ_4K; 2197 2198 return SZ_1G; /* Uninitialized, used max size */ 2199 } 2200 2201 static u64 xe_vma_set_pte_size(struct xe_vma *vma, u64 size) 2202 { 2203 switch (size) { 2204 case SZ_1G: 2205 vma->gpuva.flags |= XE_VMA_PTE_1G; 2206 break; 2207 case SZ_2M: 2208 vma->gpuva.flags |= XE_VMA_PTE_2M; 2209 break; 2210 } 2211 2212 return SZ_4K; 2213 } 2214 2215 static int xe_vma_op_commit(struct xe_vm *vm, struct xe_vma_op *op) 2216 { 2217 int err = 0; 2218 2219 lockdep_assert_held_write(&vm->lock); 2220 2221 switch (op->base.op) { 2222 case DRM_GPUVA_OP_MAP: 2223 err |= xe_vm_insert_vma(vm, op->map.vma); 2224 if (!err) 2225 op->flags |= XE_VMA_OP_COMMITTED; 2226 break; 2227 case DRM_GPUVA_OP_REMAP: 2228 { 2229 u8 tile_present = 2230 gpuva_to_vma(op->base.remap.unmap->va)->tile_present; 2231 2232 prep_vma_destroy(vm, gpuva_to_vma(op->base.remap.unmap->va), 2233 true); 2234 op->flags |= XE_VMA_OP_COMMITTED; 2235 2236 if (op->remap.prev) { 2237 err |= xe_vm_insert_vma(vm, op->remap.prev); 2238 if (!err) 2239 op->flags |= XE_VMA_OP_PREV_COMMITTED; 2240 if (!err && op->remap.skip_prev) { 2241 op->remap.prev->tile_present = 2242 tile_present; 2243 op->remap.prev = NULL; 2244 } 2245 } 2246 if (op->remap.next) { 2247 err |= xe_vm_insert_vma(vm, op->remap.next); 2248 if (!err) 2249 op->flags |= XE_VMA_OP_NEXT_COMMITTED; 2250 if (!err && op->remap.skip_next) { 2251 op->remap.next->tile_present = 2252 tile_present; 2253 op->remap.next = NULL; 2254 } 2255 } 2256 2257 /* Adjust for partial unbind after removin VMA from VM */ 2258 if (!err) { 2259 op->base.remap.unmap->va->va.addr = op->remap.start; 2260 op->base.remap.unmap->va->va.range = op->remap.range; 2261 } 2262 break; 2263 } 2264 case DRM_GPUVA_OP_UNMAP: 2265 prep_vma_destroy(vm, gpuva_to_vma(op->base.unmap.va), true); 2266 op->flags |= XE_VMA_OP_COMMITTED; 2267 break; 2268 case DRM_GPUVA_OP_PREFETCH: 2269 op->flags |= XE_VMA_OP_COMMITTED; 2270 break; 2271 default: 2272 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 2273 } 2274 2275 return err; 2276 } 2277 2278 2279 static int vm_bind_ioctl_ops_parse(struct xe_vm *vm, struct xe_exec_queue *q, 2280 struct drm_gpuva_ops *ops, 2281 struct xe_sync_entry *syncs, u32 num_syncs, 2282 struct list_head *ops_list, bool last) 2283 { 2284 struct xe_vma_op *last_op = NULL; 2285 struct drm_gpuva_op *__op; 2286 int err = 0; 2287 2288 lockdep_assert_held_write(&vm->lock); 2289 2290 drm_gpuva_for_each_op(__op, ops) { 2291 struct xe_vma_op *op = gpuva_op_to_vma_op(__op); 2292 struct xe_vma *vma; 2293 bool first = list_empty(ops_list); 2294 unsigned int flags = 0; 2295 2296 INIT_LIST_HEAD(&op->link); 2297 list_add_tail(&op->link, ops_list); 2298 2299 if (first) { 2300 op->flags |= XE_VMA_OP_FIRST; 2301 op->num_syncs = num_syncs; 2302 op->syncs = syncs; 2303 } 2304 2305 op->q = q; 2306 2307 switch (op->base.op) { 2308 case DRM_GPUVA_OP_MAP: 2309 { 2310 flags |= op->map.read_only ? 2311 VMA_CREATE_FLAG_READ_ONLY : 0; 2312 flags |= op->map.is_null ? 2313 VMA_CREATE_FLAG_IS_NULL : 0; 2314 2315 vma = new_vma(vm, &op->base.map, op->map.pat_index, 2316 flags); 2317 if (IS_ERR(vma)) 2318 return PTR_ERR(vma); 2319 2320 op->map.vma = vma; 2321 break; 2322 } 2323 case DRM_GPUVA_OP_REMAP: 2324 { 2325 struct xe_vma *old = 2326 gpuva_to_vma(op->base.remap.unmap->va); 2327 2328 op->remap.start = xe_vma_start(old); 2329 op->remap.range = xe_vma_size(old); 2330 2331 if (op->base.remap.prev) { 2332 flags |= op->base.remap.unmap->va->flags & 2333 XE_VMA_READ_ONLY ? 2334 VMA_CREATE_FLAG_READ_ONLY : 0; 2335 flags |= op->base.remap.unmap->va->flags & 2336 DRM_GPUVA_SPARSE ? 2337 VMA_CREATE_FLAG_IS_NULL : 0; 2338 2339 vma = new_vma(vm, op->base.remap.prev, 2340 old->pat_index, flags); 2341 if (IS_ERR(vma)) 2342 return PTR_ERR(vma); 2343 2344 op->remap.prev = vma; 2345 2346 /* 2347 * Userptr creates a new SG mapping so 2348 * we must also rebind. 2349 */ 2350 op->remap.skip_prev = !xe_vma_is_userptr(old) && 2351 IS_ALIGNED(xe_vma_end(vma), 2352 xe_vma_max_pte_size(old)); 2353 if (op->remap.skip_prev) { 2354 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old)); 2355 op->remap.range -= 2356 xe_vma_end(vma) - 2357 xe_vma_start(old); 2358 op->remap.start = xe_vma_end(vma); 2359 } 2360 } 2361 2362 if (op->base.remap.next) { 2363 flags |= op->base.remap.unmap->va->flags & 2364 XE_VMA_READ_ONLY ? 2365 VMA_CREATE_FLAG_READ_ONLY : 0; 2366 flags |= op->base.remap.unmap->va->flags & 2367 DRM_GPUVA_SPARSE ? 2368 VMA_CREATE_FLAG_IS_NULL : 0; 2369 2370 vma = new_vma(vm, op->base.remap.next, 2371 old->pat_index, flags); 2372 if (IS_ERR(vma)) 2373 return PTR_ERR(vma); 2374 2375 op->remap.next = vma; 2376 2377 /* 2378 * Userptr creates a new SG mapping so 2379 * we must also rebind. 2380 */ 2381 op->remap.skip_next = !xe_vma_is_userptr(old) && 2382 IS_ALIGNED(xe_vma_start(vma), 2383 xe_vma_max_pte_size(old)); 2384 if (op->remap.skip_next) { 2385 xe_vma_set_pte_size(vma, xe_vma_max_pte_size(old)); 2386 op->remap.range -= 2387 xe_vma_end(old) - 2388 xe_vma_start(vma); 2389 } 2390 } 2391 break; 2392 } 2393 case DRM_GPUVA_OP_UNMAP: 2394 case DRM_GPUVA_OP_PREFETCH: 2395 /* Nothing to do */ 2396 break; 2397 default: 2398 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 2399 } 2400 2401 last_op = op; 2402 2403 err = xe_vma_op_commit(vm, op); 2404 if (err) 2405 return err; 2406 } 2407 2408 /* FIXME: Unhandled corner case */ 2409 XE_WARN_ON(!last_op && last && !list_empty(ops_list)); 2410 2411 if (!last_op) 2412 return 0; 2413 2414 last_op->ops = ops; 2415 if (last) { 2416 last_op->flags |= XE_VMA_OP_LAST; 2417 last_op->num_syncs = num_syncs; 2418 last_op->syncs = syncs; 2419 } 2420 2421 return 0; 2422 } 2423 2424 static int op_execute(struct drm_exec *exec, struct xe_vm *vm, 2425 struct xe_vma *vma, struct xe_vma_op *op) 2426 { 2427 int err; 2428 2429 lockdep_assert_held_write(&vm->lock); 2430 2431 err = xe_vm_prepare_vma(exec, vma, 1); 2432 if (err) 2433 return err; 2434 2435 xe_vm_assert_held(vm); 2436 xe_bo_assert_held(xe_vma_bo(vma)); 2437 2438 switch (op->base.op) { 2439 case DRM_GPUVA_OP_MAP: 2440 err = xe_vm_bind(vm, vma, op->q, xe_vma_bo(vma), 2441 op->syncs, op->num_syncs, 2442 op->map.immediate || !xe_vm_in_fault_mode(vm), 2443 op->flags & XE_VMA_OP_FIRST, 2444 op->flags & XE_VMA_OP_LAST); 2445 break; 2446 case DRM_GPUVA_OP_REMAP: 2447 { 2448 bool prev = !!op->remap.prev; 2449 bool next = !!op->remap.next; 2450 2451 if (!op->remap.unmap_done) { 2452 if (prev || next) 2453 vma->gpuva.flags |= XE_VMA_FIRST_REBIND; 2454 err = xe_vm_unbind(vm, vma, op->q, op->syncs, 2455 op->num_syncs, 2456 op->flags & XE_VMA_OP_FIRST, 2457 op->flags & XE_VMA_OP_LAST && 2458 !prev && !next); 2459 if (err) 2460 break; 2461 op->remap.unmap_done = true; 2462 } 2463 2464 if (prev) { 2465 op->remap.prev->gpuva.flags |= XE_VMA_LAST_REBIND; 2466 err = xe_vm_bind(vm, op->remap.prev, op->q, 2467 xe_vma_bo(op->remap.prev), op->syncs, 2468 op->num_syncs, true, false, 2469 op->flags & XE_VMA_OP_LAST && !next); 2470 op->remap.prev->gpuva.flags &= ~XE_VMA_LAST_REBIND; 2471 if (err) 2472 break; 2473 op->remap.prev = NULL; 2474 } 2475 2476 if (next) { 2477 op->remap.next->gpuva.flags |= XE_VMA_LAST_REBIND; 2478 err = xe_vm_bind(vm, op->remap.next, op->q, 2479 xe_vma_bo(op->remap.next), 2480 op->syncs, op->num_syncs, 2481 true, false, 2482 op->flags & XE_VMA_OP_LAST); 2483 op->remap.next->gpuva.flags &= ~XE_VMA_LAST_REBIND; 2484 if (err) 2485 break; 2486 op->remap.next = NULL; 2487 } 2488 2489 break; 2490 } 2491 case DRM_GPUVA_OP_UNMAP: 2492 err = xe_vm_unbind(vm, vma, op->q, op->syncs, 2493 op->num_syncs, op->flags & XE_VMA_OP_FIRST, 2494 op->flags & XE_VMA_OP_LAST); 2495 break; 2496 case DRM_GPUVA_OP_PREFETCH: 2497 err = xe_vm_prefetch(vm, vma, op->q, op->prefetch.region, 2498 op->syncs, op->num_syncs, 2499 op->flags & XE_VMA_OP_FIRST, 2500 op->flags & XE_VMA_OP_LAST); 2501 break; 2502 default: 2503 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 2504 } 2505 2506 if (err) 2507 trace_xe_vma_fail(vma); 2508 2509 return err; 2510 } 2511 2512 static int __xe_vma_op_execute(struct xe_vm *vm, struct xe_vma *vma, 2513 struct xe_vma_op *op) 2514 { 2515 struct drm_exec exec; 2516 int err; 2517 2518 retry_userptr: 2519 drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); 2520 drm_exec_until_all_locked(&exec) { 2521 err = op_execute(&exec, vm, vma, op); 2522 drm_exec_retry_on_contention(&exec); 2523 if (err) 2524 break; 2525 } 2526 drm_exec_fini(&exec); 2527 2528 if (err == -EAGAIN) { 2529 lockdep_assert_held_write(&vm->lock); 2530 2531 if (op->base.op == DRM_GPUVA_OP_REMAP) { 2532 if (!op->remap.unmap_done) 2533 vma = gpuva_to_vma(op->base.remap.unmap->va); 2534 else if (op->remap.prev) 2535 vma = op->remap.prev; 2536 else 2537 vma = op->remap.next; 2538 } 2539 2540 if (xe_vma_is_userptr(vma)) { 2541 err = xe_vma_userptr_pin_pages(to_userptr_vma(vma)); 2542 if (!err) 2543 goto retry_userptr; 2544 2545 trace_xe_vma_fail(vma); 2546 } 2547 } 2548 2549 return err; 2550 } 2551 2552 static int xe_vma_op_execute(struct xe_vm *vm, struct xe_vma_op *op) 2553 { 2554 int ret = 0; 2555 2556 lockdep_assert_held_write(&vm->lock); 2557 2558 switch (op->base.op) { 2559 case DRM_GPUVA_OP_MAP: 2560 ret = __xe_vma_op_execute(vm, op->map.vma, op); 2561 break; 2562 case DRM_GPUVA_OP_REMAP: 2563 { 2564 struct xe_vma *vma; 2565 2566 if (!op->remap.unmap_done) 2567 vma = gpuva_to_vma(op->base.remap.unmap->va); 2568 else if (op->remap.prev) 2569 vma = op->remap.prev; 2570 else 2571 vma = op->remap.next; 2572 2573 ret = __xe_vma_op_execute(vm, vma, op); 2574 break; 2575 } 2576 case DRM_GPUVA_OP_UNMAP: 2577 ret = __xe_vma_op_execute(vm, gpuva_to_vma(op->base.unmap.va), 2578 op); 2579 break; 2580 case DRM_GPUVA_OP_PREFETCH: 2581 ret = __xe_vma_op_execute(vm, 2582 gpuva_to_vma(op->base.prefetch.va), 2583 op); 2584 break; 2585 default: 2586 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 2587 } 2588 2589 return ret; 2590 } 2591 2592 static void xe_vma_op_cleanup(struct xe_vm *vm, struct xe_vma_op *op) 2593 { 2594 bool last = op->flags & XE_VMA_OP_LAST; 2595 2596 if (last) { 2597 while (op->num_syncs--) 2598 xe_sync_entry_cleanup(&op->syncs[op->num_syncs]); 2599 kfree(op->syncs); 2600 if (op->q) 2601 xe_exec_queue_put(op->q); 2602 } 2603 if (!list_empty(&op->link)) 2604 list_del(&op->link); 2605 if (op->ops) 2606 drm_gpuva_ops_free(&vm->gpuvm, op->ops); 2607 if (last) 2608 xe_vm_put(vm); 2609 } 2610 2611 static void xe_vma_op_unwind(struct xe_vm *vm, struct xe_vma_op *op, 2612 bool post_commit, bool prev_post_commit, 2613 bool next_post_commit) 2614 { 2615 lockdep_assert_held_write(&vm->lock); 2616 2617 switch (op->base.op) { 2618 case DRM_GPUVA_OP_MAP: 2619 if (op->map.vma) { 2620 prep_vma_destroy(vm, op->map.vma, post_commit); 2621 xe_vma_destroy_unlocked(op->map.vma); 2622 } 2623 break; 2624 case DRM_GPUVA_OP_UNMAP: 2625 { 2626 struct xe_vma *vma = gpuva_to_vma(op->base.unmap.va); 2627 2628 if (vma) { 2629 down_read(&vm->userptr.notifier_lock); 2630 vma->gpuva.flags &= ~XE_VMA_DESTROYED; 2631 up_read(&vm->userptr.notifier_lock); 2632 if (post_commit) 2633 xe_vm_insert_vma(vm, vma); 2634 } 2635 break; 2636 } 2637 case DRM_GPUVA_OP_REMAP: 2638 { 2639 struct xe_vma *vma = gpuva_to_vma(op->base.remap.unmap->va); 2640 2641 if (op->remap.prev) { 2642 prep_vma_destroy(vm, op->remap.prev, prev_post_commit); 2643 xe_vma_destroy_unlocked(op->remap.prev); 2644 } 2645 if (op->remap.next) { 2646 prep_vma_destroy(vm, op->remap.next, next_post_commit); 2647 xe_vma_destroy_unlocked(op->remap.next); 2648 } 2649 if (vma) { 2650 down_read(&vm->userptr.notifier_lock); 2651 vma->gpuva.flags &= ~XE_VMA_DESTROYED; 2652 up_read(&vm->userptr.notifier_lock); 2653 if (post_commit) 2654 xe_vm_insert_vma(vm, vma); 2655 } 2656 break; 2657 } 2658 case DRM_GPUVA_OP_PREFETCH: 2659 /* Nothing to do */ 2660 break; 2661 default: 2662 drm_warn(&vm->xe->drm, "NOT POSSIBLE"); 2663 } 2664 } 2665 2666 static void vm_bind_ioctl_ops_unwind(struct xe_vm *vm, 2667 struct drm_gpuva_ops **ops, 2668 int num_ops_list) 2669 { 2670 int i; 2671 2672 for (i = num_ops_list - 1; i >= 0; --i) { 2673 struct drm_gpuva_ops *__ops = ops[i]; 2674 struct drm_gpuva_op *__op; 2675 2676 if (!__ops) 2677 continue; 2678 2679 drm_gpuva_for_each_op_reverse(__op, __ops) { 2680 struct xe_vma_op *op = gpuva_op_to_vma_op(__op); 2681 2682 xe_vma_op_unwind(vm, op, 2683 op->flags & XE_VMA_OP_COMMITTED, 2684 op->flags & XE_VMA_OP_PREV_COMMITTED, 2685 op->flags & XE_VMA_OP_NEXT_COMMITTED); 2686 } 2687 2688 drm_gpuva_ops_free(&vm->gpuvm, __ops); 2689 } 2690 } 2691 2692 static int vm_bind_ioctl_ops_execute(struct xe_vm *vm, 2693 struct list_head *ops_list) 2694 { 2695 struct xe_vma_op *op, *next; 2696 int err; 2697 2698 lockdep_assert_held_write(&vm->lock); 2699 2700 list_for_each_entry_safe(op, next, ops_list, link) { 2701 err = xe_vma_op_execute(vm, op); 2702 if (err) { 2703 drm_warn(&vm->xe->drm, "VM op(%d) failed with %d", 2704 op->base.op, err); 2705 /* 2706 * FIXME: Killing VM rather than proper error handling 2707 */ 2708 xe_vm_kill(vm); 2709 return -ENOSPC; 2710 } 2711 xe_vma_op_cleanup(vm, op); 2712 } 2713 2714 return 0; 2715 } 2716 2717 #define SUPPORTED_FLAGS \ 2718 (DRM_XE_VM_BIND_FLAG_READONLY | \ 2719 DRM_XE_VM_BIND_FLAG_IMMEDIATE | DRM_XE_VM_BIND_FLAG_NULL) 2720 #define XE_64K_PAGE_MASK 0xffffull 2721 #define ALL_DRM_XE_SYNCS_FLAGS (DRM_XE_SYNCS_FLAG_WAIT_FOR_OP) 2722 2723 #define MAX_BINDS 512 /* FIXME: Picking random upper limit */ 2724 2725 static int vm_bind_ioctl_check_args(struct xe_device *xe, 2726 struct drm_xe_vm_bind *args, 2727 struct drm_xe_vm_bind_op **bind_ops) 2728 { 2729 int err; 2730 int i; 2731 2732 if (XE_IOCTL_DBG(xe, args->pad || args->pad2) || 2733 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) 2734 return -EINVAL; 2735 2736 if (XE_IOCTL_DBG(xe, args->extensions) || 2737 XE_IOCTL_DBG(xe, args->num_binds > MAX_BINDS)) 2738 return -EINVAL; 2739 2740 if (args->num_binds > 1) { 2741 u64 __user *bind_user = 2742 u64_to_user_ptr(args->vector_of_binds); 2743 2744 *bind_ops = kmalloc(sizeof(struct drm_xe_vm_bind_op) * 2745 args->num_binds, GFP_KERNEL); 2746 if (!*bind_ops) 2747 return -ENOMEM; 2748 2749 err = __copy_from_user(*bind_ops, bind_user, 2750 sizeof(struct drm_xe_vm_bind_op) * 2751 args->num_binds); 2752 if (XE_IOCTL_DBG(xe, err)) { 2753 err = -EFAULT; 2754 goto free_bind_ops; 2755 } 2756 } else { 2757 *bind_ops = &args->bind; 2758 } 2759 2760 for (i = 0; i < args->num_binds; ++i) { 2761 u64 range = (*bind_ops)[i].range; 2762 u64 addr = (*bind_ops)[i].addr; 2763 u32 op = (*bind_ops)[i].op; 2764 u32 flags = (*bind_ops)[i].flags; 2765 u32 obj = (*bind_ops)[i].obj; 2766 u64 obj_offset = (*bind_ops)[i].obj_offset; 2767 u32 prefetch_region = (*bind_ops)[i].prefetch_mem_region_instance; 2768 bool is_null = flags & DRM_XE_VM_BIND_FLAG_NULL; 2769 u16 pat_index = (*bind_ops)[i].pat_index; 2770 u16 coh_mode; 2771 2772 if (XE_IOCTL_DBG(xe, pat_index >= xe->pat.n_entries)) { 2773 err = -EINVAL; 2774 goto free_bind_ops; 2775 } 2776 2777 pat_index = array_index_nospec(pat_index, xe->pat.n_entries); 2778 (*bind_ops)[i].pat_index = pat_index; 2779 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index); 2780 if (XE_IOCTL_DBG(xe, !coh_mode)) { /* hw reserved */ 2781 err = -EINVAL; 2782 goto free_bind_ops; 2783 } 2784 2785 if (XE_WARN_ON(coh_mode > XE_COH_AT_LEAST_1WAY)) { 2786 err = -EINVAL; 2787 goto free_bind_ops; 2788 } 2789 2790 if (XE_IOCTL_DBG(xe, op > DRM_XE_VM_BIND_OP_PREFETCH) || 2791 XE_IOCTL_DBG(xe, flags & ~SUPPORTED_FLAGS) || 2792 XE_IOCTL_DBG(xe, obj && is_null) || 2793 XE_IOCTL_DBG(xe, obj_offset && is_null) || 2794 XE_IOCTL_DBG(xe, op != DRM_XE_VM_BIND_OP_MAP && 2795 is_null) || 2796 XE_IOCTL_DBG(xe, !obj && 2797 op == DRM_XE_VM_BIND_OP_MAP && 2798 !is_null) || 2799 XE_IOCTL_DBG(xe, !obj && 2800 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) || 2801 XE_IOCTL_DBG(xe, addr && 2802 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) || 2803 XE_IOCTL_DBG(xe, range && 2804 op == DRM_XE_VM_BIND_OP_UNMAP_ALL) || 2805 XE_IOCTL_DBG(xe, obj && 2806 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) || 2807 XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE && 2808 op == DRM_XE_VM_BIND_OP_MAP_USERPTR) || 2809 XE_IOCTL_DBG(xe, obj && 2810 op == DRM_XE_VM_BIND_OP_PREFETCH) || 2811 XE_IOCTL_DBG(xe, prefetch_region && 2812 op != DRM_XE_VM_BIND_OP_PREFETCH) || 2813 XE_IOCTL_DBG(xe, !(BIT(prefetch_region) & 2814 xe->info.mem_region_mask)) || 2815 XE_IOCTL_DBG(xe, obj && 2816 op == DRM_XE_VM_BIND_OP_UNMAP)) { 2817 err = -EINVAL; 2818 goto free_bind_ops; 2819 } 2820 2821 if (XE_IOCTL_DBG(xe, obj_offset & ~PAGE_MASK) || 2822 XE_IOCTL_DBG(xe, addr & ~PAGE_MASK) || 2823 XE_IOCTL_DBG(xe, range & ~PAGE_MASK) || 2824 XE_IOCTL_DBG(xe, !range && 2825 op != DRM_XE_VM_BIND_OP_UNMAP_ALL)) { 2826 err = -EINVAL; 2827 goto free_bind_ops; 2828 } 2829 } 2830 2831 return 0; 2832 2833 free_bind_ops: 2834 if (args->num_binds > 1) 2835 kfree(*bind_ops); 2836 return err; 2837 } 2838 2839 static int vm_bind_ioctl_signal_fences(struct xe_vm *vm, 2840 struct xe_exec_queue *q, 2841 struct xe_sync_entry *syncs, 2842 int num_syncs) 2843 { 2844 struct dma_fence *fence; 2845 int i, err = 0; 2846 2847 fence = xe_sync_in_fence_get(syncs, num_syncs, 2848 to_wait_exec_queue(vm, q), vm); 2849 if (IS_ERR(fence)) 2850 return PTR_ERR(fence); 2851 2852 for (i = 0; i < num_syncs; i++) 2853 xe_sync_entry_signal(&syncs[i], NULL, fence); 2854 2855 xe_exec_queue_last_fence_set(to_wait_exec_queue(vm, q), vm, 2856 fence); 2857 dma_fence_put(fence); 2858 2859 return err; 2860 } 2861 2862 int xe_vm_bind_ioctl(struct drm_device *dev, void *data, struct drm_file *file) 2863 { 2864 struct xe_device *xe = to_xe_device(dev); 2865 struct xe_file *xef = to_xe_file(file); 2866 struct drm_xe_vm_bind *args = data; 2867 struct drm_xe_sync __user *syncs_user; 2868 struct xe_bo **bos = NULL; 2869 struct drm_gpuva_ops **ops = NULL; 2870 struct xe_vm *vm; 2871 struct xe_exec_queue *q = NULL; 2872 u32 num_syncs, num_ufence = 0; 2873 struct xe_sync_entry *syncs = NULL; 2874 struct drm_xe_vm_bind_op *bind_ops; 2875 LIST_HEAD(ops_list); 2876 int err; 2877 int i; 2878 2879 err = vm_bind_ioctl_check_args(xe, args, &bind_ops); 2880 if (err) 2881 return err; 2882 2883 if (args->exec_queue_id) { 2884 q = xe_exec_queue_lookup(xef, args->exec_queue_id); 2885 if (XE_IOCTL_DBG(xe, !q)) { 2886 err = -ENOENT; 2887 goto free_objs; 2888 } 2889 2890 if (XE_IOCTL_DBG(xe, !(q->flags & EXEC_QUEUE_FLAG_VM))) { 2891 err = -EINVAL; 2892 goto put_exec_queue; 2893 } 2894 } 2895 2896 vm = xe_vm_lookup(xef, args->vm_id); 2897 if (XE_IOCTL_DBG(xe, !vm)) { 2898 err = -EINVAL; 2899 goto put_exec_queue; 2900 } 2901 2902 err = down_write_killable(&vm->lock); 2903 if (err) 2904 goto put_vm; 2905 2906 if (XE_IOCTL_DBG(xe, xe_vm_is_closed_or_banned(vm))) { 2907 err = -ENOENT; 2908 goto release_vm_lock; 2909 } 2910 2911 for (i = 0; i < args->num_binds; ++i) { 2912 u64 range = bind_ops[i].range; 2913 u64 addr = bind_ops[i].addr; 2914 2915 if (XE_IOCTL_DBG(xe, range > vm->size) || 2916 XE_IOCTL_DBG(xe, addr > vm->size - range)) { 2917 err = -EINVAL; 2918 goto release_vm_lock; 2919 } 2920 } 2921 2922 if (args->num_binds) { 2923 bos = kcalloc(args->num_binds, sizeof(*bos), GFP_KERNEL); 2924 if (!bos) { 2925 err = -ENOMEM; 2926 goto release_vm_lock; 2927 } 2928 2929 ops = kcalloc(args->num_binds, sizeof(*ops), GFP_KERNEL); 2930 if (!ops) { 2931 err = -ENOMEM; 2932 goto release_vm_lock; 2933 } 2934 } 2935 2936 for (i = 0; i < args->num_binds; ++i) { 2937 struct drm_gem_object *gem_obj; 2938 u64 range = bind_ops[i].range; 2939 u64 addr = bind_ops[i].addr; 2940 u32 obj = bind_ops[i].obj; 2941 u64 obj_offset = bind_ops[i].obj_offset; 2942 u16 pat_index = bind_ops[i].pat_index; 2943 u16 coh_mode; 2944 2945 if (!obj) 2946 continue; 2947 2948 gem_obj = drm_gem_object_lookup(file, obj); 2949 if (XE_IOCTL_DBG(xe, !gem_obj)) { 2950 err = -ENOENT; 2951 goto put_obj; 2952 } 2953 bos[i] = gem_to_xe_bo(gem_obj); 2954 2955 if (XE_IOCTL_DBG(xe, range > bos[i]->size) || 2956 XE_IOCTL_DBG(xe, obj_offset > 2957 bos[i]->size - range)) { 2958 err = -EINVAL; 2959 goto put_obj; 2960 } 2961 2962 if (bos[i]->flags & XE_BO_INTERNAL_64K) { 2963 if (XE_IOCTL_DBG(xe, obj_offset & 2964 XE_64K_PAGE_MASK) || 2965 XE_IOCTL_DBG(xe, addr & XE_64K_PAGE_MASK) || 2966 XE_IOCTL_DBG(xe, range & XE_64K_PAGE_MASK)) { 2967 err = -EINVAL; 2968 goto put_obj; 2969 } 2970 } 2971 2972 coh_mode = xe_pat_index_get_coh_mode(xe, pat_index); 2973 if (bos[i]->cpu_caching) { 2974 if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE && 2975 bos[i]->cpu_caching == DRM_XE_GEM_CPU_CACHING_WB)) { 2976 err = -EINVAL; 2977 goto put_obj; 2978 } 2979 } else if (XE_IOCTL_DBG(xe, coh_mode == XE_COH_NONE)) { 2980 /* 2981 * Imported dma-buf from a different device should 2982 * require 1way or 2way coherency since we don't know 2983 * how it was mapped on the CPU. Just assume is it 2984 * potentially cached on CPU side. 2985 */ 2986 err = -EINVAL; 2987 goto put_obj; 2988 } 2989 } 2990 2991 if (args->num_syncs) { 2992 syncs = kcalloc(args->num_syncs, sizeof(*syncs), GFP_KERNEL); 2993 if (!syncs) { 2994 err = -ENOMEM; 2995 goto put_obj; 2996 } 2997 } 2998 2999 syncs_user = u64_to_user_ptr(args->syncs); 3000 for (num_syncs = 0; num_syncs < args->num_syncs; num_syncs++) { 3001 err = xe_sync_entry_parse(xe, xef, &syncs[num_syncs], 3002 &syncs_user[num_syncs], 3003 (xe_vm_in_lr_mode(vm) ? 3004 SYNC_PARSE_FLAG_LR_MODE : 0) | 3005 (!args->num_binds ? 3006 SYNC_PARSE_FLAG_DISALLOW_USER_FENCE : 0)); 3007 if (err) 3008 goto free_syncs; 3009 3010 if (xe_sync_is_ufence(&syncs[num_syncs])) 3011 num_ufence++; 3012 } 3013 3014 if (XE_IOCTL_DBG(xe, num_ufence > 1)) { 3015 err = -EINVAL; 3016 goto free_syncs; 3017 } 3018 3019 if (!args->num_binds) { 3020 err = -ENODATA; 3021 goto free_syncs; 3022 } 3023 3024 for (i = 0; i < args->num_binds; ++i) { 3025 u64 range = bind_ops[i].range; 3026 u64 addr = bind_ops[i].addr; 3027 u32 op = bind_ops[i].op; 3028 u32 flags = bind_ops[i].flags; 3029 u64 obj_offset = bind_ops[i].obj_offset; 3030 u32 prefetch_region = bind_ops[i].prefetch_mem_region_instance; 3031 u16 pat_index = bind_ops[i].pat_index; 3032 3033 ops[i] = vm_bind_ioctl_ops_create(vm, bos[i], obj_offset, 3034 addr, range, op, flags, 3035 prefetch_region, pat_index); 3036 if (IS_ERR(ops[i])) { 3037 err = PTR_ERR(ops[i]); 3038 ops[i] = NULL; 3039 goto unwind_ops; 3040 } 3041 3042 err = vm_bind_ioctl_ops_parse(vm, q, ops[i], syncs, num_syncs, 3043 &ops_list, 3044 i == args->num_binds - 1); 3045 if (err) 3046 goto unwind_ops; 3047 } 3048 3049 /* Nothing to do */ 3050 if (list_empty(&ops_list)) { 3051 err = -ENODATA; 3052 goto unwind_ops; 3053 } 3054 3055 xe_vm_get(vm); 3056 if (q) 3057 xe_exec_queue_get(q); 3058 3059 err = vm_bind_ioctl_ops_execute(vm, &ops_list); 3060 3061 up_write(&vm->lock); 3062 3063 if (q) 3064 xe_exec_queue_put(q); 3065 xe_vm_put(vm); 3066 3067 for (i = 0; bos && i < args->num_binds; ++i) 3068 xe_bo_put(bos[i]); 3069 3070 kfree(bos); 3071 kfree(ops); 3072 if (args->num_binds > 1) 3073 kfree(bind_ops); 3074 3075 return err; 3076 3077 unwind_ops: 3078 vm_bind_ioctl_ops_unwind(vm, ops, args->num_binds); 3079 free_syncs: 3080 if (err == -ENODATA) 3081 err = vm_bind_ioctl_signal_fences(vm, q, syncs, num_syncs); 3082 while (num_syncs--) 3083 xe_sync_entry_cleanup(&syncs[num_syncs]); 3084 3085 kfree(syncs); 3086 put_obj: 3087 for (i = 0; i < args->num_binds; ++i) 3088 xe_bo_put(bos[i]); 3089 release_vm_lock: 3090 up_write(&vm->lock); 3091 put_vm: 3092 xe_vm_put(vm); 3093 put_exec_queue: 3094 if (q) 3095 xe_exec_queue_put(q); 3096 free_objs: 3097 kfree(bos); 3098 kfree(ops); 3099 if (args->num_binds > 1) 3100 kfree(bind_ops); 3101 return err; 3102 } 3103 3104 /** 3105 * xe_vm_lock() - Lock the vm's dma_resv object 3106 * @vm: The struct xe_vm whose lock is to be locked 3107 * @intr: Whether to perform any wait interruptible 3108 * 3109 * Return: 0 on success, -EINTR if @intr is true and the wait for a 3110 * contended lock was interrupted. If @intr is false, the function 3111 * always returns 0. 3112 */ 3113 int xe_vm_lock(struct xe_vm *vm, bool intr) 3114 { 3115 if (intr) 3116 return dma_resv_lock_interruptible(xe_vm_resv(vm), NULL); 3117 3118 return dma_resv_lock(xe_vm_resv(vm), NULL); 3119 } 3120 3121 /** 3122 * xe_vm_unlock() - Unlock the vm's dma_resv object 3123 * @vm: The struct xe_vm whose lock is to be released. 3124 * 3125 * Unlock a buffer object lock that was locked by xe_vm_lock(). 3126 */ 3127 void xe_vm_unlock(struct xe_vm *vm) 3128 { 3129 dma_resv_unlock(xe_vm_resv(vm)); 3130 } 3131 3132 /** 3133 * xe_vm_invalidate_vma - invalidate GPU mappings for VMA without a lock 3134 * @vma: VMA to invalidate 3135 * 3136 * Walks a list of page tables leaves which it memset the entries owned by this 3137 * VMA to zero, invalidates the TLBs, and block until TLBs invalidation is 3138 * complete. 3139 * 3140 * Returns 0 for success, negative error code otherwise. 3141 */ 3142 int xe_vm_invalidate_vma(struct xe_vma *vma) 3143 { 3144 struct xe_device *xe = xe_vma_vm(vma)->xe; 3145 struct xe_tile *tile; 3146 u32 tile_needs_invalidate = 0; 3147 int seqno[XE_MAX_TILES_PER_DEVICE]; 3148 u8 id; 3149 int ret; 3150 3151 xe_assert(xe, xe_vm_in_fault_mode(xe_vma_vm(vma))); 3152 xe_assert(xe, !xe_vma_is_null(vma)); 3153 trace_xe_vma_usm_invalidate(vma); 3154 3155 /* Check that we don't race with page-table updates */ 3156 if (IS_ENABLED(CONFIG_PROVE_LOCKING)) { 3157 if (xe_vma_is_userptr(vma)) { 3158 WARN_ON_ONCE(!mmu_interval_check_retry 3159 (&to_userptr_vma(vma)->userptr.notifier, 3160 to_userptr_vma(vma)->userptr.notifier_seq)); 3161 WARN_ON_ONCE(!dma_resv_test_signaled(xe_vm_resv(xe_vma_vm(vma)), 3162 DMA_RESV_USAGE_BOOKKEEP)); 3163 3164 } else { 3165 xe_bo_assert_held(xe_vma_bo(vma)); 3166 } 3167 } 3168 3169 for_each_tile(tile, xe, id) { 3170 if (xe_pt_zap_ptes(tile, vma)) { 3171 tile_needs_invalidate |= BIT(id); 3172 xe_device_wmb(xe); 3173 /* 3174 * FIXME: We potentially need to invalidate multiple 3175 * GTs within the tile 3176 */ 3177 seqno[id] = xe_gt_tlb_invalidation_vma(tile->primary_gt, NULL, vma); 3178 if (seqno[id] < 0) 3179 return seqno[id]; 3180 } 3181 } 3182 3183 for_each_tile(tile, xe, id) { 3184 if (tile_needs_invalidate & BIT(id)) { 3185 ret = xe_gt_tlb_invalidation_wait(tile->primary_gt, seqno[id]); 3186 if (ret < 0) 3187 return ret; 3188 } 3189 } 3190 3191 vma->usm.tile_invalidated = vma->tile_mask; 3192 3193 return 0; 3194 } 3195 3196 int xe_analyze_vm(struct drm_printer *p, struct xe_vm *vm, int gt_id) 3197 { 3198 struct drm_gpuva *gpuva; 3199 bool is_vram; 3200 uint64_t addr; 3201 3202 if (!down_read_trylock(&vm->lock)) { 3203 drm_printf(p, " Failed to acquire VM lock to dump capture"); 3204 return 0; 3205 } 3206 if (vm->pt_root[gt_id]) { 3207 addr = xe_bo_addr(vm->pt_root[gt_id]->bo, 0, XE_PAGE_SIZE); 3208 is_vram = xe_bo_is_vram(vm->pt_root[gt_id]->bo); 3209 drm_printf(p, " VM root: A:0x%llx %s\n", addr, 3210 is_vram ? "VRAM" : "SYS"); 3211 } 3212 3213 drm_gpuvm_for_each_va(gpuva, &vm->gpuvm) { 3214 struct xe_vma *vma = gpuva_to_vma(gpuva); 3215 bool is_userptr = xe_vma_is_userptr(vma); 3216 bool is_null = xe_vma_is_null(vma); 3217 3218 if (is_null) { 3219 addr = 0; 3220 } else if (is_userptr) { 3221 struct sg_table *sg = to_userptr_vma(vma)->userptr.sg; 3222 struct xe_res_cursor cur; 3223 3224 if (sg) { 3225 xe_res_first_sg(sg, 0, XE_PAGE_SIZE, &cur); 3226 addr = xe_res_dma(&cur); 3227 } else { 3228 addr = 0; 3229 } 3230 } else { 3231 addr = __xe_bo_addr(xe_vma_bo(vma), 0, XE_PAGE_SIZE); 3232 is_vram = xe_bo_is_vram(xe_vma_bo(vma)); 3233 } 3234 drm_printf(p, " [%016llx-%016llx] S:0x%016llx A:%016llx %s\n", 3235 xe_vma_start(vma), xe_vma_end(vma) - 1, 3236 xe_vma_size(vma), 3237 addr, is_null ? "NULL" : is_userptr ? "USR" : 3238 is_vram ? "VRAM" : "SYS"); 3239 } 3240 up_read(&vm->lock); 3241 3242 return 0; 3243 } 3244