1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2024 Intel Corporation 4 */ 5 6 #include "xe_bo.h" 7 #include "xe_gt_stats.h" 8 #include "xe_gt_tlb_invalidation.h" 9 #include "xe_migrate.h" 10 #include "xe_module.h" 11 #include "xe_pt.h" 12 #include "xe_svm.h" 13 #include "xe_ttm_vram_mgr.h" 14 #include "xe_vm.h" 15 #include "xe_vm_types.h" 16 17 static bool xe_svm_range_in_vram(struct xe_svm_range *range) 18 { 19 /* 20 * Advisory only check whether the range is currently backed by VRAM 21 * memory. 22 */ 23 24 struct drm_gpusvm_range_flags flags = { 25 /* Pairs with WRITE_ONCE in drm_gpusvm.c */ 26 .__flags = READ_ONCE(range->base.flags.__flags), 27 }; 28 29 return flags.has_devmem_pages; 30 } 31 32 static bool xe_svm_range_has_vram_binding(struct xe_svm_range *range) 33 { 34 /* Not reliable without notifier lock */ 35 return xe_svm_range_in_vram(range) && range->tile_present; 36 } 37 38 static struct xe_vm *gpusvm_to_vm(struct drm_gpusvm *gpusvm) 39 { 40 return container_of(gpusvm, struct xe_vm, svm.gpusvm); 41 } 42 43 static struct xe_vm *range_to_vm(struct drm_gpusvm_range *r) 44 { 45 return gpusvm_to_vm(r->gpusvm); 46 } 47 48 #define range_debug(r__, operaton__) \ 49 vm_dbg(&range_to_vm(&(r__)->base)->xe->drm, \ 50 "%s: asid=%u, gpusvm=%p, vram=%d,%d, seqno=%lu, " \ 51 "start=0x%014lx, end=0x%014lx, size=%lu", \ 52 (operaton__), range_to_vm(&(r__)->base)->usm.asid, \ 53 (r__)->base.gpusvm, \ 54 xe_svm_range_in_vram((r__)) ? 1 : 0, \ 55 xe_svm_range_has_vram_binding((r__)) ? 1 : 0, \ 56 (r__)->base.notifier_seq, \ 57 xe_svm_range_start((r__)), xe_svm_range_end((r__)), \ 58 xe_svm_range_size((r__))) 59 60 void xe_svm_range_debug(struct xe_svm_range *range, const char *operation) 61 { 62 range_debug(range, operation); 63 } 64 65 static void *xe_svm_devm_owner(struct xe_device *xe) 66 { 67 return xe; 68 } 69 70 static struct drm_gpusvm_range * 71 xe_svm_range_alloc(struct drm_gpusvm *gpusvm) 72 { 73 struct xe_svm_range *range; 74 75 range = kzalloc(sizeof(*range), GFP_KERNEL); 76 if (!range) 77 return NULL; 78 79 INIT_LIST_HEAD(&range->garbage_collector_link); 80 xe_vm_get(gpusvm_to_vm(gpusvm)); 81 82 return &range->base; 83 } 84 85 static void xe_svm_range_free(struct drm_gpusvm_range *range) 86 { 87 xe_vm_put(range_to_vm(range)); 88 kfree(range); 89 } 90 91 static void 92 xe_svm_garbage_collector_add_range(struct xe_vm *vm, struct xe_svm_range *range, 93 const struct mmu_notifier_range *mmu_range) 94 { 95 struct xe_device *xe = vm->xe; 96 97 range_debug(range, "GARBAGE COLLECTOR ADD"); 98 99 drm_gpusvm_range_set_unmapped(&range->base, mmu_range); 100 101 spin_lock(&vm->svm.garbage_collector.lock); 102 if (list_empty(&range->garbage_collector_link)) 103 list_add_tail(&range->garbage_collector_link, 104 &vm->svm.garbage_collector.range_list); 105 spin_unlock(&vm->svm.garbage_collector.lock); 106 107 queue_work(xe_device_get_root_tile(xe)->primary_gt->usm.pf_wq, 108 &vm->svm.garbage_collector.work); 109 } 110 111 static u8 112 xe_svm_range_notifier_event_begin(struct xe_vm *vm, struct drm_gpusvm_range *r, 113 const struct mmu_notifier_range *mmu_range, 114 u64 *adj_start, u64 *adj_end) 115 { 116 struct xe_svm_range *range = to_xe_range(r); 117 struct xe_device *xe = vm->xe; 118 struct xe_tile *tile; 119 u8 tile_mask = 0; 120 u8 id; 121 122 xe_svm_assert_in_notifier(vm); 123 124 range_debug(range, "NOTIFIER"); 125 126 /* Skip if already unmapped or if no binding exist */ 127 if (range->base.flags.unmapped || !range->tile_present) 128 return 0; 129 130 range_debug(range, "NOTIFIER - EXECUTE"); 131 132 /* Adjust invalidation to range boundaries */ 133 *adj_start = min(xe_svm_range_start(range), mmu_range->start); 134 *adj_end = max(xe_svm_range_end(range), mmu_range->end); 135 136 /* 137 * XXX: Ideally would zap PTEs in one shot in xe_svm_invalidate but the 138 * invalidation code can't correctly cope with sparse ranges or 139 * invalidations spanning multiple ranges. 140 */ 141 for_each_tile(tile, xe, id) 142 if (xe_pt_zap_ptes_range(tile, vm, range)) { 143 tile_mask |= BIT(id); 144 /* 145 * WRITE_ONCE pairs with READ_ONCE in 146 * xe_vm_has_valid_gpu_mapping() 147 */ 148 WRITE_ONCE(range->tile_invalidated, 149 range->tile_invalidated | BIT(id)); 150 } 151 152 return tile_mask; 153 } 154 155 static void 156 xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r, 157 const struct mmu_notifier_range *mmu_range) 158 { 159 struct drm_gpusvm_ctx ctx = { .in_notifier = true, }; 160 161 xe_svm_assert_in_notifier(vm); 162 163 drm_gpusvm_range_unmap_pages(&vm->svm.gpusvm, r, &ctx); 164 if (!xe_vm_is_closed(vm) && mmu_range->event == MMU_NOTIFY_UNMAP) 165 xe_svm_garbage_collector_add_range(vm, to_xe_range(r), 166 mmu_range); 167 } 168 169 static void xe_svm_invalidate(struct drm_gpusvm *gpusvm, 170 struct drm_gpusvm_notifier *notifier, 171 const struct mmu_notifier_range *mmu_range) 172 { 173 struct xe_vm *vm = gpusvm_to_vm(gpusvm); 174 struct xe_device *xe = vm->xe; 175 struct drm_gpusvm_range *r, *first; 176 u64 adj_start = mmu_range->start, adj_end = mmu_range->end; 177 u8 tile_mask = 0; 178 long err; 179 180 xe_svm_assert_in_notifier(vm); 181 182 vm_dbg(&gpusvm_to_vm(gpusvm)->xe->drm, 183 "INVALIDATE: asid=%u, gpusvm=%p, seqno=%lu, start=0x%016lx, end=0x%016lx, event=%d", 184 vm->usm.asid, gpusvm, notifier->notifier.invalidate_seq, 185 mmu_range->start, mmu_range->end, mmu_range->event); 186 187 /* Adjust invalidation to notifier boundaries */ 188 adj_start = max(drm_gpusvm_notifier_start(notifier), adj_start); 189 adj_end = min(drm_gpusvm_notifier_end(notifier), adj_end); 190 191 first = drm_gpusvm_range_find(notifier, adj_start, adj_end); 192 if (!first) 193 return; 194 195 /* 196 * PTs may be getting destroyed so not safe to touch these but PT should 197 * be invalidated at this point in time. Regardless we still need to 198 * ensure any dma mappings are unmapped in the here. 199 */ 200 if (xe_vm_is_closed(vm)) 201 goto range_notifier_event_end; 202 203 /* 204 * XXX: Less than ideal to always wait on VM's resv slots if an 205 * invalidation is not required. Could walk range list twice to figure 206 * out if an invalidations is need, but also not ideal. 207 */ 208 err = dma_resv_wait_timeout(xe_vm_resv(vm), 209 DMA_RESV_USAGE_BOOKKEEP, 210 false, MAX_SCHEDULE_TIMEOUT); 211 XE_WARN_ON(err <= 0); 212 213 r = first; 214 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) 215 tile_mask |= xe_svm_range_notifier_event_begin(vm, r, mmu_range, 216 &adj_start, 217 &adj_end); 218 if (!tile_mask) 219 goto range_notifier_event_end; 220 221 xe_device_wmb(xe); 222 223 err = xe_vm_range_tilemask_tlb_invalidation(vm, adj_start, adj_end, tile_mask); 224 WARN_ON_ONCE(err); 225 226 range_notifier_event_end: 227 r = first; 228 drm_gpusvm_for_each_range(r, notifier, adj_start, adj_end) 229 xe_svm_range_notifier_event_end(vm, r, mmu_range); 230 } 231 232 static int __xe_svm_garbage_collector(struct xe_vm *vm, 233 struct xe_svm_range *range) 234 { 235 struct dma_fence *fence; 236 237 range_debug(range, "GARBAGE COLLECTOR"); 238 239 xe_vm_lock(vm, false); 240 fence = xe_vm_range_unbind(vm, range); 241 xe_vm_unlock(vm); 242 if (IS_ERR(fence)) 243 return PTR_ERR(fence); 244 dma_fence_put(fence); 245 246 drm_gpusvm_range_remove(&vm->svm.gpusvm, &range->base); 247 248 return 0; 249 } 250 251 static int xe_svm_garbage_collector(struct xe_vm *vm) 252 { 253 struct xe_svm_range *range; 254 int err; 255 256 lockdep_assert_held_write(&vm->lock); 257 258 if (xe_vm_is_closed_or_banned(vm)) 259 return -ENOENT; 260 261 spin_lock(&vm->svm.garbage_collector.lock); 262 for (;;) { 263 range = list_first_entry_or_null(&vm->svm.garbage_collector.range_list, 264 typeof(*range), 265 garbage_collector_link); 266 if (!range) 267 break; 268 269 list_del(&range->garbage_collector_link); 270 spin_unlock(&vm->svm.garbage_collector.lock); 271 272 err = __xe_svm_garbage_collector(vm, range); 273 if (err) { 274 drm_warn(&vm->xe->drm, 275 "Garbage collection failed: %pe\n", 276 ERR_PTR(err)); 277 xe_vm_kill(vm, true); 278 return err; 279 } 280 281 spin_lock(&vm->svm.garbage_collector.lock); 282 } 283 spin_unlock(&vm->svm.garbage_collector.lock); 284 285 return 0; 286 } 287 288 static void xe_svm_garbage_collector_work_func(struct work_struct *w) 289 { 290 struct xe_vm *vm = container_of(w, struct xe_vm, 291 svm.garbage_collector.work); 292 293 down_write(&vm->lock); 294 xe_svm_garbage_collector(vm); 295 up_write(&vm->lock); 296 } 297 298 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) 299 300 static struct xe_vram_region *page_to_vr(struct page *page) 301 { 302 return container_of(page_pgmap(page), struct xe_vram_region, pagemap); 303 } 304 305 static struct xe_tile *vr_to_tile(struct xe_vram_region *vr) 306 { 307 return container_of(vr, struct xe_tile, mem.vram); 308 } 309 310 static u64 xe_vram_region_page_to_dpa(struct xe_vram_region *vr, 311 struct page *page) 312 { 313 u64 dpa; 314 struct xe_tile *tile = vr_to_tile(vr); 315 u64 pfn = page_to_pfn(page); 316 u64 offset; 317 318 xe_tile_assert(tile, is_device_private_page(page)); 319 xe_tile_assert(tile, (pfn << PAGE_SHIFT) >= vr->hpa_base); 320 321 offset = (pfn << PAGE_SHIFT) - vr->hpa_base; 322 dpa = vr->dpa_base + offset; 323 324 return dpa; 325 } 326 327 enum xe_svm_copy_dir { 328 XE_SVM_COPY_TO_VRAM, 329 XE_SVM_COPY_TO_SRAM, 330 }; 331 332 static int xe_svm_copy(struct page **pages, dma_addr_t *dma_addr, 333 unsigned long npages, const enum xe_svm_copy_dir dir) 334 { 335 struct xe_vram_region *vr = NULL; 336 struct xe_tile *tile; 337 struct dma_fence *fence = NULL; 338 unsigned long i; 339 #define XE_VRAM_ADDR_INVALID ~0x0ull 340 u64 vram_addr = XE_VRAM_ADDR_INVALID; 341 int err = 0, pos = 0; 342 bool sram = dir == XE_SVM_COPY_TO_SRAM; 343 344 /* 345 * This flow is complex: it locates physically contiguous device pages, 346 * derives the starting physical address, and performs a single GPU copy 347 * to for every 8M chunk in a DMA address array. Both device pages and 348 * DMA addresses may be sparsely populated. If either is NULL, a copy is 349 * triggered based on the current search state. The last GPU copy is 350 * waited on to ensure all copies are complete. 351 */ 352 353 for (i = 0; i < npages; ++i) { 354 struct page *spage = pages[i]; 355 struct dma_fence *__fence; 356 u64 __vram_addr; 357 bool match = false, chunk, last; 358 359 #define XE_MIGRATE_CHUNK_SIZE SZ_8M 360 chunk = (i - pos) == (XE_MIGRATE_CHUNK_SIZE / PAGE_SIZE); 361 last = (i + 1) == npages; 362 363 /* No CPU page and no device pages queue'd to copy */ 364 if (!dma_addr[i] && vram_addr == XE_VRAM_ADDR_INVALID) 365 continue; 366 367 if (!vr && spage) { 368 vr = page_to_vr(spage); 369 tile = vr_to_tile(vr); 370 } 371 XE_WARN_ON(spage && page_to_vr(spage) != vr); 372 373 /* 374 * CPU page and device page valid, capture physical address on 375 * first device page, check if physical contiguous on subsequent 376 * device pages. 377 */ 378 if (dma_addr[i] && spage) { 379 __vram_addr = xe_vram_region_page_to_dpa(vr, spage); 380 if (vram_addr == XE_VRAM_ADDR_INVALID) { 381 vram_addr = __vram_addr; 382 pos = i; 383 } 384 385 match = vram_addr + PAGE_SIZE * (i - pos) == __vram_addr; 386 } 387 388 /* 389 * Mismatched physical address, 8M copy chunk, or last page - 390 * trigger a copy. 391 */ 392 if (!match || chunk || last) { 393 /* 394 * Extra page for first copy if last page and matching 395 * physical address. 396 */ 397 int incr = (match && last) ? 1 : 0; 398 399 if (vram_addr != XE_VRAM_ADDR_INVALID) { 400 if (sram) { 401 vm_dbg(&tile->xe->drm, 402 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld", 403 vram_addr, (u64)dma_addr[pos], i - pos + incr); 404 __fence = xe_migrate_from_vram(tile->migrate, 405 i - pos + incr, 406 vram_addr, 407 dma_addr + pos); 408 } else { 409 vm_dbg(&tile->xe->drm, 410 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%ld", 411 (u64)dma_addr[pos], vram_addr, i - pos + incr); 412 __fence = xe_migrate_to_vram(tile->migrate, 413 i - pos + incr, 414 dma_addr + pos, 415 vram_addr); 416 } 417 if (IS_ERR(__fence)) { 418 err = PTR_ERR(__fence); 419 goto err_out; 420 } 421 422 dma_fence_put(fence); 423 fence = __fence; 424 } 425 426 /* Setup physical address of next device page */ 427 if (dma_addr[i] && spage) { 428 vram_addr = __vram_addr; 429 pos = i; 430 } else { 431 vram_addr = XE_VRAM_ADDR_INVALID; 432 } 433 434 /* Extra mismatched device page, copy it */ 435 if (!match && last && vram_addr != XE_VRAM_ADDR_INVALID) { 436 if (sram) { 437 vm_dbg(&tile->xe->drm, 438 "COPY TO SRAM - 0x%016llx -> 0x%016llx, NPAGES=%d", 439 vram_addr, (u64)dma_addr[pos], 1); 440 __fence = xe_migrate_from_vram(tile->migrate, 1, 441 vram_addr, 442 dma_addr + pos); 443 } else { 444 vm_dbg(&tile->xe->drm, 445 "COPY TO VRAM - 0x%016llx -> 0x%016llx, NPAGES=%d", 446 (u64)dma_addr[pos], vram_addr, 1); 447 __fence = xe_migrate_to_vram(tile->migrate, 1, 448 dma_addr + pos, 449 vram_addr); 450 } 451 if (IS_ERR(__fence)) { 452 err = PTR_ERR(__fence); 453 goto err_out; 454 } 455 456 dma_fence_put(fence); 457 fence = __fence; 458 } 459 } 460 } 461 462 err_out: 463 /* Wait for all copies to complete */ 464 if (fence) { 465 dma_fence_wait(fence, false); 466 dma_fence_put(fence); 467 } 468 469 return err; 470 #undef XE_MIGRATE_CHUNK_SIZE 471 #undef XE_VRAM_ADDR_INVALID 472 } 473 474 static int xe_svm_copy_to_devmem(struct page **pages, dma_addr_t *dma_addr, 475 unsigned long npages) 476 { 477 return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_VRAM); 478 } 479 480 static int xe_svm_copy_to_ram(struct page **pages, dma_addr_t *dma_addr, 481 unsigned long npages) 482 { 483 return xe_svm_copy(pages, dma_addr, npages, XE_SVM_COPY_TO_SRAM); 484 } 485 486 static struct xe_bo *to_xe_bo(struct drm_pagemap_devmem *devmem_allocation) 487 { 488 return container_of(devmem_allocation, struct xe_bo, devmem_allocation); 489 } 490 491 static void xe_svm_devmem_release(struct drm_pagemap_devmem *devmem_allocation) 492 { 493 struct xe_bo *bo = to_xe_bo(devmem_allocation); 494 495 xe_bo_put_async(bo); 496 } 497 498 static u64 block_offset_to_pfn(struct xe_vram_region *vr, u64 offset) 499 { 500 return PHYS_PFN(offset + vr->hpa_base); 501 } 502 503 static struct drm_buddy *tile_to_buddy(struct xe_tile *tile) 504 { 505 return &tile->mem.vram.ttm.mm; 506 } 507 508 static int xe_svm_populate_devmem_pfn(struct drm_pagemap_devmem *devmem_allocation, 509 unsigned long npages, unsigned long *pfn) 510 { 511 struct xe_bo *bo = to_xe_bo(devmem_allocation); 512 struct ttm_resource *res = bo->ttm.resource; 513 struct list_head *blocks = &to_xe_ttm_vram_mgr_resource(res)->blocks; 514 struct drm_buddy_block *block; 515 int j = 0; 516 517 list_for_each_entry(block, blocks, link) { 518 struct xe_vram_region *vr = block->private; 519 struct xe_tile *tile = vr_to_tile(vr); 520 struct drm_buddy *buddy = tile_to_buddy(tile); 521 u64 block_pfn = block_offset_to_pfn(vr, drm_buddy_block_offset(block)); 522 int i; 523 524 for (i = 0; i < drm_buddy_block_size(buddy, block) >> PAGE_SHIFT; ++i) 525 pfn[j++] = block_pfn + i; 526 } 527 528 return 0; 529 } 530 531 static const struct drm_pagemap_devmem_ops dpagemap_devmem_ops = { 532 .devmem_release = xe_svm_devmem_release, 533 .populate_devmem_pfn = xe_svm_populate_devmem_pfn, 534 .copy_to_devmem = xe_svm_copy_to_devmem, 535 .copy_to_ram = xe_svm_copy_to_ram, 536 }; 537 538 #endif 539 540 static const struct drm_gpusvm_ops gpusvm_ops = { 541 .range_alloc = xe_svm_range_alloc, 542 .range_free = xe_svm_range_free, 543 .invalidate = xe_svm_invalidate, 544 }; 545 546 static const unsigned long fault_chunk_sizes[] = { 547 SZ_2M, 548 SZ_64K, 549 SZ_4K, 550 }; 551 552 /** 553 * xe_svm_init() - SVM initialize 554 * @vm: The VM. 555 * 556 * Initialize SVM state which is embedded within the VM. 557 * 558 * Return: 0 on success, negative error code on error. 559 */ 560 int xe_svm_init(struct xe_vm *vm) 561 { 562 int err; 563 564 spin_lock_init(&vm->svm.garbage_collector.lock); 565 INIT_LIST_HEAD(&vm->svm.garbage_collector.range_list); 566 INIT_WORK(&vm->svm.garbage_collector.work, 567 xe_svm_garbage_collector_work_func); 568 569 err = drm_gpusvm_init(&vm->svm.gpusvm, "Xe SVM", &vm->xe->drm, 570 current->mm, xe_svm_devm_owner(vm->xe), 0, 571 vm->size, xe_modparam.svm_notifier_size * SZ_1M, 572 &gpusvm_ops, fault_chunk_sizes, 573 ARRAY_SIZE(fault_chunk_sizes)); 574 if (err) 575 return err; 576 577 drm_gpusvm_driver_set_lock(&vm->svm.gpusvm, &vm->lock); 578 579 return 0; 580 } 581 582 /** 583 * xe_svm_close() - SVM close 584 * @vm: The VM. 585 * 586 * Close SVM state (i.e., stop and flush all SVM actions). 587 */ 588 void xe_svm_close(struct xe_vm *vm) 589 { 590 xe_assert(vm->xe, xe_vm_is_closed(vm)); 591 flush_work(&vm->svm.garbage_collector.work); 592 } 593 594 /** 595 * xe_svm_fini() - SVM finalize 596 * @vm: The VM. 597 * 598 * Finalize SVM state which is embedded within the VM. 599 */ 600 void xe_svm_fini(struct xe_vm *vm) 601 { 602 xe_assert(vm->xe, xe_vm_is_closed(vm)); 603 604 drm_gpusvm_fini(&vm->svm.gpusvm); 605 } 606 607 static bool xe_svm_range_is_valid(struct xe_svm_range *range, 608 struct xe_tile *tile, 609 bool devmem_only) 610 { 611 return (xe_vm_has_valid_gpu_mapping(tile, range->tile_present, 612 range->tile_invalidated) && 613 (!devmem_only || xe_svm_range_in_vram(range))); 614 } 615 616 /** xe_svm_range_migrate_to_smem() - Move range pages from VRAM to SMEM 617 * @vm: xe_vm pointer 618 * @range: Pointer to the SVM range structure 619 * 620 * The xe_svm_range_migrate_to_smem() checks range has pages in VRAM 621 * and migrates them to SMEM 622 */ 623 void xe_svm_range_migrate_to_smem(struct xe_vm *vm, struct xe_svm_range *range) 624 { 625 if (xe_svm_range_in_vram(range)) 626 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base); 627 } 628 629 /** 630 * xe_svm_range_validate() - Check if the SVM range is valid 631 * @vm: xe_vm pointer 632 * @range: Pointer to the SVM range structure 633 * @tile_mask: Mask representing the tiles to be checked 634 * @devmem_preferred : if true range needs to be in devmem 635 * 636 * The xe_svm_range_validate() function checks if a range is 637 * valid and located in the desired memory region. 638 * 639 * Return: true if the range is valid, false otherwise 640 */ 641 bool xe_svm_range_validate(struct xe_vm *vm, 642 struct xe_svm_range *range, 643 u8 tile_mask, bool devmem_preferred) 644 { 645 bool ret; 646 647 xe_svm_notifier_lock(vm); 648 649 ret = (range->tile_present & ~range->tile_invalidated & tile_mask) == tile_mask && 650 (devmem_preferred == range->base.flags.has_devmem_pages); 651 652 xe_svm_notifier_unlock(vm); 653 654 return ret; 655 } 656 657 /** 658 * xe_svm_find_vma_start - Find start of CPU VMA 659 * @vm: xe_vm pointer 660 * @start: start address 661 * @end: end address 662 * @vma: Pointer to struct xe_vma 663 * 664 * 665 * This function searches for a cpu vma, within the specified 666 * range [start, end] in the given VM. It adjusts the range based on the 667 * xe_vma start and end addresses. If no cpu VMA is found, it returns ULONG_MAX. 668 * 669 * Return: The starting address of the VMA within the range, 670 * or ULONG_MAX if no VMA is found 671 */ 672 u64 xe_svm_find_vma_start(struct xe_vm *vm, u64 start, u64 end, struct xe_vma *vma) 673 { 674 return drm_gpusvm_find_vma_start(&vm->svm.gpusvm, 675 max(start, xe_vma_start(vma)), 676 min(end, xe_vma_end(vma))); 677 } 678 679 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) 680 static struct xe_vram_region *tile_to_vr(struct xe_tile *tile) 681 { 682 return &tile->mem.vram; 683 } 684 685 /** 686 * xe_svm_alloc_vram()- Allocate device memory pages for range, 687 * migrating existing data. 688 * @vm: The VM. 689 * @tile: tile to allocate vram from 690 * @range: SVM range 691 * @ctx: DRM GPU SVM context 692 * 693 * Return: 0 on success, error code on failure. 694 */ 695 int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, 696 struct xe_svm_range *range, 697 const struct drm_gpusvm_ctx *ctx) 698 { 699 struct mm_struct *mm = vm->svm.gpusvm.mm; 700 struct xe_vram_region *vr = tile_to_vr(tile); 701 struct drm_buddy_block *block; 702 struct list_head *blocks; 703 struct xe_bo *bo; 704 ktime_t end = 0; 705 int err; 706 707 if (!range->base.flags.migrate_devmem) 708 return -EINVAL; 709 710 range_debug(range, "ALLOCATE VRAM"); 711 712 if (!mmget_not_zero(mm)) 713 return -EFAULT; 714 mmap_read_lock(mm); 715 716 retry: 717 bo = xe_bo_create_locked(tile_to_xe(tile), NULL, NULL, 718 xe_svm_range_size(range), 719 ttm_bo_type_device, 720 XE_BO_FLAG_VRAM_IF_DGFX(tile) | 721 XE_BO_FLAG_CPU_ADDR_MIRROR); 722 if (IS_ERR(bo)) { 723 err = PTR_ERR(bo); 724 if (xe_vm_validate_should_retry(NULL, err, &end)) 725 goto retry; 726 goto unlock; 727 } 728 729 drm_pagemap_devmem_init(&bo->devmem_allocation, 730 vm->xe->drm.dev, mm, 731 &dpagemap_devmem_ops, 732 &tile->mem.vram.dpagemap, 733 xe_svm_range_size(range)); 734 735 blocks = &to_xe_ttm_vram_mgr_resource(bo->ttm.resource)->blocks; 736 list_for_each_entry(block, blocks, link) 737 block->private = vr; 738 739 xe_bo_get(bo); 740 err = drm_pagemap_migrate_to_devmem(&bo->devmem_allocation, 741 mm, 742 xe_svm_range_start(range), 743 xe_svm_range_end(range), 744 ctx->timeslice_ms, 745 xe_svm_devm_owner(vm->xe)); 746 if (err) 747 xe_svm_devmem_release(&bo->devmem_allocation); 748 749 xe_bo_unlock(bo); 750 xe_bo_put(bo); 751 752 unlock: 753 mmap_read_unlock(mm); 754 mmput(mm); 755 756 return err; 757 } 758 #endif 759 760 static bool supports_4K_migration(struct xe_device *xe) 761 { 762 if (xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) 763 return false; 764 765 return true; 766 } 767 768 /** 769 * xe_svm_range_needs_migrate_to_vram() - SVM range needs migrate to VRAM or not 770 * @range: SVM range for which migration needs to be decided 771 * @vma: vma which has range 772 * @preferred_region_is_vram: preferred region for range is vram 773 * 774 * Return: True for range needing migration and migration is supported else false 775 */ 776 bool xe_svm_range_needs_migrate_to_vram(struct xe_svm_range *range, struct xe_vma *vma, 777 bool preferred_region_is_vram) 778 { 779 struct xe_vm *vm = range_to_vm(&range->base); 780 u64 range_size = xe_svm_range_size(range); 781 782 if (!range->base.flags.migrate_devmem || !preferred_region_is_vram) 783 return false; 784 785 xe_assert(vm->xe, IS_DGFX(vm->xe)); 786 787 if (preferred_region_is_vram && xe_svm_range_in_vram(range)) { 788 drm_info(&vm->xe->drm, "Range is already in VRAM\n"); 789 return false; 790 } 791 792 if (preferred_region_is_vram && range_size < SZ_64K && !supports_4K_migration(vm->xe)) { 793 drm_dbg(&vm->xe->drm, "Platform doesn't support SZ_4K range migration\n"); 794 return false; 795 } 796 797 return true; 798 } 799 800 /** 801 * xe_svm_handle_pagefault() - SVM handle page fault 802 * @vm: The VM. 803 * @vma: The CPU address mirror VMA. 804 * @gt: The gt upon the fault occurred. 805 * @fault_addr: The GPU fault address. 806 * @atomic: The fault atomic access bit. 807 * 808 * Create GPU bindings for a SVM page fault. Optionally migrate to device 809 * memory. 810 * 811 * Return: 0 on success, negative error code on error. 812 */ 813 int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma, 814 struct xe_gt *gt, u64 fault_addr, 815 bool atomic) 816 { 817 struct drm_gpusvm_ctx ctx = { 818 .read_only = xe_vma_read_only(vma), 819 .devmem_possible = IS_DGFX(vm->xe) && 820 IS_ENABLED(CONFIG_DRM_XE_PAGEMAP), 821 .check_pages_threshold = IS_DGFX(vm->xe) && 822 IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? SZ_64K : 0, 823 .devmem_only = atomic && IS_DGFX(vm->xe) && 824 IS_ENABLED(CONFIG_DRM_XE_PAGEMAP), 825 .timeslice_ms = atomic && IS_DGFX(vm->xe) && 826 IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) ? 827 vm->xe->atomic_svm_timeslice_ms : 0, 828 }; 829 struct xe_svm_range *range; 830 struct dma_fence *fence; 831 struct xe_tile *tile = gt_to_tile(gt); 832 int migrate_try_count = ctx.devmem_only ? 3 : 1; 833 ktime_t end = 0; 834 int err; 835 836 lockdep_assert_held_write(&vm->lock); 837 xe_assert(vm->xe, xe_vma_is_cpu_addr_mirror(vma)); 838 839 xe_gt_stats_incr(gt, XE_GT_STATS_ID_SVM_PAGEFAULT_COUNT, 1); 840 841 retry: 842 /* Always process UNMAPs first so view SVM ranges is current */ 843 err = xe_svm_garbage_collector(vm); 844 if (err) 845 return err; 846 847 range = xe_svm_range_find_or_insert(vm, fault_addr, vma, &ctx); 848 849 if (IS_ERR(range)) 850 return PTR_ERR(range); 851 852 if (ctx.devmem_only && !range->base.flags.migrate_devmem) 853 return -EACCES; 854 855 if (xe_svm_range_is_valid(range, tile, ctx.devmem_only)) 856 return 0; 857 858 range_debug(range, "PAGE FAULT"); 859 860 if (--migrate_try_count >= 0 && 861 xe_svm_range_needs_migrate_to_vram(range, vma, IS_DGFX(vm->xe))) { 862 err = xe_svm_alloc_vram(vm, tile, range, &ctx); 863 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ 864 if (err) { 865 if (migrate_try_count || !ctx.devmem_only) { 866 drm_dbg(&vm->xe->drm, 867 "VRAM allocation failed, falling back to retrying fault, asid=%u, errno=%pe\n", 868 vm->usm.asid, ERR_PTR(err)); 869 goto retry; 870 } else { 871 drm_err(&vm->xe->drm, 872 "VRAM allocation failed, retry count exceeded, asid=%u, errno=%pe\n", 873 vm->usm.asid, ERR_PTR(err)); 874 return err; 875 } 876 } 877 } 878 879 range_debug(range, "GET PAGES"); 880 err = xe_svm_range_get_pages(vm, range, &ctx); 881 /* Corner where CPU mappings have changed */ 882 if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) { 883 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ 884 if (migrate_try_count > 0 || !ctx.devmem_only) { 885 drm_dbg(&vm->xe->drm, 886 "Get pages failed, falling back to retrying, asid=%u, gpusvm=%p, errno=%pe\n", 887 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); 888 range_debug(range, "PAGE FAULT - RETRY PAGES"); 889 goto retry; 890 } else { 891 drm_err(&vm->xe->drm, 892 "Get pages failed, retry count exceeded, asid=%u, gpusvm=%p, errno=%pe\n", 893 vm->usm.asid, &vm->svm.gpusvm, ERR_PTR(err)); 894 } 895 } 896 if (err) { 897 range_debug(range, "PAGE FAULT - FAIL PAGE COLLECT"); 898 goto err_out; 899 } 900 901 range_debug(range, "PAGE FAULT - BIND"); 902 903 retry_bind: 904 xe_vm_lock(vm, false); 905 fence = xe_vm_range_rebind(vm, vma, range, BIT(tile->id)); 906 if (IS_ERR(fence)) { 907 xe_vm_unlock(vm); 908 err = PTR_ERR(fence); 909 if (err == -EAGAIN) { 910 ctx.timeslice_ms <<= 1; /* Double timeslice if we have to retry */ 911 range_debug(range, "PAGE FAULT - RETRY BIND"); 912 goto retry; 913 } 914 if (xe_vm_validate_should_retry(NULL, err, &end)) 915 goto retry_bind; 916 goto err_out; 917 } 918 xe_vm_unlock(vm); 919 920 dma_fence_wait(fence, false); 921 dma_fence_put(fence); 922 923 err_out: 924 925 return err; 926 } 927 928 /** 929 * xe_svm_has_mapping() - SVM has mappings 930 * @vm: The VM. 931 * @start: Start address. 932 * @end: End address. 933 * 934 * Check if an address range has SVM mappings. 935 * 936 * Return: True if address range has a SVM mapping, False otherwise 937 */ 938 bool xe_svm_has_mapping(struct xe_vm *vm, u64 start, u64 end) 939 { 940 return drm_gpusvm_has_mapping(&vm->svm.gpusvm, start, end); 941 } 942 943 /** 944 * xe_svm_bo_evict() - SVM evict BO to system memory 945 * @bo: BO to evict 946 * 947 * SVM evict BO to system memory. GPU SVM layer ensures all device pages 948 * are evicted before returning. 949 * 950 * Return: 0 on success standard error code otherwise 951 */ 952 int xe_svm_bo_evict(struct xe_bo *bo) 953 { 954 return drm_pagemap_evict_to_ram(&bo->devmem_allocation); 955 } 956 957 /** 958 * xe_svm_range_find_or_insert- Find or insert GPU SVM range 959 * @vm: xe_vm pointer 960 * @addr: address for which range needs to be found/inserted 961 * @vma: Pointer to struct xe_vma which mirrors CPU 962 * @ctx: GPU SVM context 963 * 964 * This function finds or inserts a newly allocated a SVM range based on the 965 * address. 966 * 967 * Return: Pointer to the SVM range on success, ERR_PTR() on failure. 968 */ 969 struct xe_svm_range *xe_svm_range_find_or_insert(struct xe_vm *vm, u64 addr, 970 struct xe_vma *vma, struct drm_gpusvm_ctx *ctx) 971 { 972 struct drm_gpusvm_range *r; 973 974 r = drm_gpusvm_range_find_or_insert(&vm->svm.gpusvm, max(addr, xe_vma_start(vma)), 975 xe_vma_start(vma), xe_vma_end(vma), ctx); 976 if (IS_ERR(r)) 977 return ERR_PTR(PTR_ERR(r)); 978 979 return to_xe_range(r); 980 } 981 982 /** 983 * xe_svm_range_get_pages() - Get pages for a SVM range 984 * @vm: Pointer to the struct xe_vm 985 * @range: Pointer to the xe SVM range structure 986 * @ctx: GPU SVM context 987 * 988 * This function gets pages for a SVM range and ensures they are mapped for 989 * DMA access. In case of failure with -EOPNOTSUPP, it evicts the range. 990 * 991 * Return: 0 on success, negative error code on failure. 992 */ 993 int xe_svm_range_get_pages(struct xe_vm *vm, struct xe_svm_range *range, 994 struct drm_gpusvm_ctx *ctx) 995 { 996 int err = 0; 997 998 err = drm_gpusvm_range_get_pages(&vm->svm.gpusvm, &range->base, ctx); 999 if (err == -EOPNOTSUPP) { 1000 range_debug(range, "PAGE FAULT - EVICT PAGES"); 1001 drm_gpusvm_range_evict(&vm->svm.gpusvm, &range->base); 1002 } 1003 1004 return err; 1005 } 1006 1007 #if IS_ENABLED(CONFIG_DRM_XE_PAGEMAP) 1008 1009 static struct drm_pagemap_device_addr 1010 xe_drm_pagemap_device_map(struct drm_pagemap *dpagemap, 1011 struct device *dev, 1012 struct page *page, 1013 unsigned int order, 1014 enum dma_data_direction dir) 1015 { 1016 struct device *pgmap_dev = dpagemap->dev; 1017 enum drm_interconnect_protocol prot; 1018 dma_addr_t addr; 1019 1020 if (pgmap_dev == dev) { 1021 addr = xe_vram_region_page_to_dpa(page_to_vr(page), page); 1022 prot = XE_INTERCONNECT_VRAM; 1023 } else { 1024 addr = DMA_MAPPING_ERROR; 1025 prot = 0; 1026 } 1027 1028 return drm_pagemap_device_addr_encode(addr, prot, order, dir); 1029 } 1030 1031 static const struct drm_pagemap_ops xe_drm_pagemap_ops = { 1032 .device_map = xe_drm_pagemap_device_map, 1033 }; 1034 1035 /** 1036 * xe_devm_add: Remap and provide memmap backing for device memory 1037 * @tile: tile that the memory region belongs to 1038 * @vr: vram memory region to remap 1039 * 1040 * This remap device memory to host physical address space and create 1041 * struct page to back device memory 1042 * 1043 * Return: 0 on success standard error code otherwise 1044 */ 1045 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr) 1046 { 1047 struct xe_device *xe = tile_to_xe(tile); 1048 struct device *dev = &to_pci_dev(xe->drm.dev)->dev; 1049 struct resource *res; 1050 void *addr; 1051 int ret; 1052 1053 res = devm_request_free_mem_region(dev, &iomem_resource, 1054 vr->usable_size); 1055 if (IS_ERR(res)) { 1056 ret = PTR_ERR(res); 1057 return ret; 1058 } 1059 1060 vr->pagemap.type = MEMORY_DEVICE_PRIVATE; 1061 vr->pagemap.range.start = res->start; 1062 vr->pagemap.range.end = res->end; 1063 vr->pagemap.nr_range = 1; 1064 vr->pagemap.ops = drm_pagemap_pagemap_ops_get(); 1065 vr->pagemap.owner = xe_svm_devm_owner(xe); 1066 addr = devm_memremap_pages(dev, &vr->pagemap); 1067 1068 vr->dpagemap.dev = dev; 1069 vr->dpagemap.ops = &xe_drm_pagemap_ops; 1070 1071 if (IS_ERR(addr)) { 1072 devm_release_mem_region(dev, res->start, resource_size(res)); 1073 ret = PTR_ERR(addr); 1074 drm_err(&xe->drm, "Failed to remap tile %d memory, errno %pe\n", 1075 tile->id, ERR_PTR(ret)); 1076 return ret; 1077 } 1078 vr->hpa_base = res->start; 1079 1080 drm_dbg(&xe->drm, "Added tile %d memory [%llx-%llx] to devm, remapped to %pr\n", 1081 tile->id, vr->io_start, vr->io_start + vr->usable_size, res); 1082 return 0; 1083 } 1084 #else 1085 int xe_svm_alloc_vram(struct xe_vm *vm, struct xe_tile *tile, 1086 struct xe_svm_range *range, 1087 const struct drm_gpusvm_ctx *ctx) 1088 { 1089 return -EOPNOTSUPP; 1090 } 1091 1092 int xe_devm_add(struct xe_tile *tile, struct xe_vram_region *vr) 1093 { 1094 return 0; 1095 } 1096 #endif 1097 1098 /** 1099 * xe_svm_flush() - SVM flush 1100 * @vm: The VM. 1101 * 1102 * Flush all SVM actions. 1103 */ 1104 void xe_svm_flush(struct xe_vm *vm) 1105 { 1106 if (xe_vm_in_fault_mode(vm)) 1107 flush_work(&vm->svm.garbage_collector.work); 1108 } 1109