1 // SPDX-License-Identifier: GPL-2.0-only OR MIT 2 /* 3 * Copyright © 2024-2025 Intel Corporation 4 */ 5 6 #include <linux/dma-fence.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/migrate.h> 9 #include <linux/pagemap.h> 10 #include <drm/drm_drv.h> 11 #include <drm/drm_pagemap.h> 12 #include <drm/drm_pagemap_util.h> 13 #include <drm/drm_print.h> 14 15 /** 16 * DOC: Overview 17 * 18 * The DRM pagemap layer is intended to augment the dev_pagemap functionality by 19 * providing a way to populate a struct mm_struct virtual range with device 20 * private pages and to provide helpers to abstract device memory allocations, 21 * to migrate memory back and forth between device memory and system RAM and 22 * to handle access (and in the future migration) between devices implementing 23 * a fast interconnect that is not necessarily visible to the rest of the 24 * system. 25 * 26 * Typically the DRM pagemap receives requests from one or more DRM GPU SVM 27 * instances to populate struct mm_struct virtual ranges with memory, and the 28 * migration is best effort only and may thus fail. The implementation should 29 * also handle device unbinding by blocking (return an -ENODEV) error for new 30 * population requests and after that migrate all device pages to system ram. 31 */ 32 33 /** 34 * DOC: Migration 35 * 36 * Migration granularity typically follows the GPU SVM range requests, but 37 * if there are clashes, due to races or due to the fact that multiple GPU 38 * SVM instances have different views of the ranges used, and because of that 39 * parts of a requested range is already present in the requested device memory, 40 * the implementation has a variety of options. It can fail and it can choose 41 * to populate only the part of the range that isn't already in device memory, 42 * and it can evict the range to system before trying to migrate. Ideally an 43 * implementation would just try to migrate the missing part of the range and 44 * allocate just enough memory to do so. 45 * 46 * When migrating to system memory as a response to a cpu fault or a device 47 * memory eviction request, currently a full device memory allocation is 48 * migrated back to system. Moving forward this might need improvement for 49 * situations where a single page needs bouncing between system memory and 50 * device memory due to, for example, atomic operations. 51 * 52 * Key DRM pagemap components: 53 * 54 * - Device Memory Allocations: 55 * Embedded structure containing enough information for the drm_pagemap to 56 * migrate to / from device memory. 57 * 58 * - Device Memory Operations: 59 * Define the interface for driver-specific device memory operations 60 * release memory, populate pfns, and copy to / from device memory. 61 */ 62 63 /** 64 * struct drm_pagemap_zdd - GPU SVM zone device data 65 * 66 * @refcount: Reference count for the zdd 67 * @devmem_allocation: device memory allocation 68 * @dpagemap: Refcounted pointer to the underlying struct drm_pagemap. 69 * 70 * This structure serves as a generic wrapper installed in 71 * page->zone_device_data. It provides infrastructure for looking up a device 72 * memory allocation upon CPU page fault and asynchronously releasing device 73 * memory once the CPU has no page references. Asynchronous release is useful 74 * because CPU page references can be dropped in IRQ contexts, while releasing 75 * device memory likely requires sleeping locks. 76 */ 77 struct drm_pagemap_zdd { 78 struct kref refcount; 79 struct drm_pagemap_devmem *devmem_allocation; 80 struct drm_pagemap *dpagemap; 81 }; 82 83 /** 84 * drm_pagemap_zdd_alloc() - Allocate a zdd structure. 85 * @dpagemap: Pointer to the underlying struct drm_pagemap. 86 * 87 * This function allocates and initializes a new zdd structure. It sets up the 88 * reference count and initializes the destroy work. 89 * 90 * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure. 91 */ 92 static struct drm_pagemap_zdd * 93 drm_pagemap_zdd_alloc(struct drm_pagemap *dpagemap) 94 { 95 struct drm_pagemap_zdd *zdd; 96 97 zdd = kmalloc_obj(*zdd); 98 if (!zdd) 99 return NULL; 100 101 kref_init(&zdd->refcount); 102 zdd->devmem_allocation = NULL; 103 zdd->dpagemap = drm_pagemap_get(dpagemap); 104 105 return zdd; 106 } 107 108 /** 109 * drm_pagemap_zdd_get() - Get a reference to a zdd structure. 110 * @zdd: Pointer to the zdd structure. 111 * 112 * This function increments the reference count of the provided zdd structure. 113 * 114 * Return: Pointer to the zdd structure. 115 */ 116 static struct drm_pagemap_zdd *drm_pagemap_zdd_get(struct drm_pagemap_zdd *zdd) 117 { 118 kref_get(&zdd->refcount); 119 return zdd; 120 } 121 122 /** 123 * drm_pagemap_zdd_destroy() - Destroy a zdd structure. 124 * @ref: Pointer to the reference count structure. 125 * 126 * This function queues the destroy_work of the zdd for asynchronous destruction. 127 */ 128 static void drm_pagemap_zdd_destroy(struct kref *ref) 129 { 130 struct drm_pagemap_zdd *zdd = 131 container_of(ref, struct drm_pagemap_zdd, refcount); 132 struct drm_pagemap_devmem *devmem = zdd->devmem_allocation; 133 struct drm_pagemap *dpagemap = zdd->dpagemap; 134 135 if (devmem) { 136 complete_all(&devmem->detached); 137 if (devmem->ops->devmem_release) 138 devmem->ops->devmem_release(devmem); 139 } 140 kfree(zdd); 141 drm_pagemap_put(dpagemap); 142 } 143 144 /** 145 * drm_pagemap_zdd_put() - Put a zdd reference. 146 * @zdd: Pointer to the zdd structure. 147 * 148 * This function decrements the reference count of the provided zdd structure 149 * and schedules its destruction if the count drops to zero. 150 */ 151 static void drm_pagemap_zdd_put(struct drm_pagemap_zdd *zdd) 152 { 153 kref_put(&zdd->refcount, drm_pagemap_zdd_destroy); 154 } 155 156 /** 157 * drm_pagemap_migration_unlock_put_page() - Put a migration page 158 * @page: Pointer to the page to put 159 * 160 * This function unlocks and puts a page. 161 */ 162 static void drm_pagemap_migration_unlock_put_page(struct page *page) 163 { 164 unlock_page(page); 165 put_page(page); 166 } 167 168 /** 169 * drm_pagemap_migration_unlock_put_pages() - Put migration pages 170 * @npages: Number of pages 171 * @migrate_pfn: Array of migrate page frame numbers 172 * 173 * This function unlocks and puts an array of pages. 174 */ 175 static void drm_pagemap_migration_unlock_put_pages(unsigned long npages, 176 unsigned long *migrate_pfn) 177 { 178 unsigned long i; 179 180 for (i = 0; i < npages; ++i) { 181 struct page *page; 182 183 if (!migrate_pfn[i]) 184 continue; 185 186 page = migrate_pfn_to_page(migrate_pfn[i]); 187 drm_pagemap_migration_unlock_put_page(page); 188 migrate_pfn[i] = 0; 189 } 190 } 191 192 /** 193 * drm_pagemap_get_devmem_page() - Get a reference to a device memory page 194 * @page: Pointer to the page 195 * @zdd: Pointer to the GPU SVM zone device data 196 * 197 * This function associates the given page with the specified GPU SVM zone 198 * device data and initializes it for zone device usage. 199 */ 200 static void drm_pagemap_get_devmem_page(struct page *page, 201 struct drm_pagemap_zdd *zdd) 202 { 203 page->zone_device_data = drm_pagemap_zdd_get(zdd); 204 zone_device_page_init(page, page_pgmap(page), 0); 205 } 206 207 /** 208 * drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration 209 * @dev: The device performing the migration. 210 * @local_dpagemap: The drm_pagemap local to the migrating device. 211 * @pagemap_addr: Array to store DMA information corresponding to mapped pages. 212 * @migrate_pfn: Array of page frame numbers of system pages or peer pages to map. 213 * @npages: Number of system pages or peer pages to map. 214 * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL) 215 * @mdetails: Details governing the migration behaviour. 216 * 217 * This function maps pages of memory for migration usage in GPU SVM. It 218 * iterates over each page frame number provided in @migrate_pfn, maps the 219 * corresponding page, and stores the DMA address in the provided @dma_addr 220 * array. 221 * 222 * Returns: 0 on success, -EFAULT if an error occurs during mapping. 223 */ 224 static int drm_pagemap_migrate_map_pages(struct device *dev, 225 struct drm_pagemap *local_dpagemap, 226 struct drm_pagemap_addr *pagemap_addr, 227 unsigned long *migrate_pfn, 228 unsigned long npages, 229 enum dma_data_direction dir, 230 const struct drm_pagemap_migrate_details *mdetails) 231 { 232 unsigned long num_peer_pages = 0, num_local_pages = 0, i; 233 234 for (i = 0; i < npages;) { 235 struct page *page = migrate_pfn_to_page(migrate_pfn[i]); 236 dma_addr_t dma_addr; 237 struct folio *folio; 238 unsigned int order = 0; 239 240 if (!page) 241 goto next; 242 243 folio = page_folio(page); 244 order = folio_order(folio); 245 246 if (is_device_private_page(page)) { 247 struct drm_pagemap_zdd *zdd = page->zone_device_data; 248 struct drm_pagemap *dpagemap = zdd->dpagemap; 249 struct drm_pagemap_addr addr; 250 251 if (dpagemap == local_dpagemap) { 252 if (!mdetails->can_migrate_same_pagemap) 253 goto next; 254 255 num_local_pages += NR_PAGES(order); 256 } else { 257 num_peer_pages += NR_PAGES(order); 258 } 259 260 addr = dpagemap->ops->device_map(dpagemap, dev, page, order, dir); 261 if (dma_mapping_error(dev, addr.addr)) 262 return -EFAULT; 263 264 pagemap_addr[i] = addr; 265 } else { 266 dma_addr = dma_map_page(dev, page, 0, page_size(page), dir); 267 if (dma_mapping_error(dev, dma_addr)) 268 return -EFAULT; 269 270 pagemap_addr[i] = 271 drm_pagemap_addr_encode(dma_addr, 272 DRM_INTERCONNECT_SYSTEM, 273 order, dir); 274 } 275 276 next: 277 i += NR_PAGES(order); 278 } 279 280 if (num_peer_pages) 281 drm_dbg(local_dpagemap->drm, "Migrating %lu peer pages over interconnect.\n", 282 num_peer_pages); 283 if (num_local_pages) 284 drm_dbg(local_dpagemap->drm, "Migrating %lu local pages over interconnect.\n", 285 num_local_pages); 286 287 return 0; 288 } 289 290 /** 291 * drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration 292 * @dev: The device for which the pages were mapped 293 * @migrate_pfn: Array of migrate pfns set up for the mapped pages. Used to 294 * determine the drm_pagemap of a peer device private page. 295 * @pagemap_addr: Array of DMA information corresponding to mapped pages 296 * @npages: Number of pages to unmap 297 * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL) 298 * 299 * This function unmaps previously mapped pages of memory for GPU Shared Virtual 300 * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks 301 * if it's valid and not already unmapped, and unmaps the corresponding page. 302 */ 303 static void drm_pagemap_migrate_unmap_pages(struct device *dev, 304 struct drm_pagemap_addr *pagemap_addr, 305 unsigned long *migrate_pfn, 306 unsigned long npages, 307 enum dma_data_direction dir) 308 { 309 unsigned long i; 310 311 for (i = 0; i < npages;) { 312 struct page *page = migrate_pfn_to_page(migrate_pfn[i]); 313 314 if (!page || !pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr)) 315 goto next; 316 317 if (is_zone_device_page(page)) { 318 struct drm_pagemap_zdd *zdd = page->zone_device_data; 319 struct drm_pagemap *dpagemap = zdd->dpagemap; 320 321 dpagemap->ops->device_unmap(dpagemap, dev, &pagemap_addr[i]); 322 } else { 323 dma_unmap_page(dev, pagemap_addr[i].addr, 324 PAGE_SIZE << pagemap_addr[i].order, dir); 325 } 326 327 next: 328 i += NR_PAGES(pagemap_addr[i].order); 329 } 330 } 331 332 static unsigned long 333 npages_in_range(unsigned long start, unsigned long end) 334 { 335 return (end - start) >> PAGE_SHIFT; 336 } 337 338 static int 339 drm_pagemap_migrate_remote_to_local(struct drm_pagemap_devmem *devmem, 340 struct device *remote_device, 341 struct drm_pagemap *remote_dpagemap, 342 unsigned long local_pfns[], 343 struct page *remote_pages[], 344 struct drm_pagemap_addr pagemap_addr[], 345 unsigned long npages, 346 const struct drm_pagemap_devmem_ops *ops, 347 const struct drm_pagemap_migrate_details *mdetails) 348 349 { 350 int err = drm_pagemap_migrate_map_pages(remote_device, remote_dpagemap, 351 pagemap_addr, local_pfns, 352 npages, DMA_FROM_DEVICE, mdetails); 353 354 if (err) 355 goto out; 356 357 err = ops->copy_to_ram(remote_pages, pagemap_addr, npages, 358 devmem->pre_migrate_fence); 359 out: 360 drm_pagemap_migrate_unmap_pages(remote_device, pagemap_addr, local_pfns, 361 npages, DMA_FROM_DEVICE); 362 return err; 363 } 364 365 static int 366 drm_pagemap_migrate_sys_to_dev(struct drm_pagemap_devmem *devmem, 367 unsigned long sys_pfns[], 368 struct page *local_pages[], 369 struct drm_pagemap_addr pagemap_addr[], 370 unsigned long npages, 371 const struct drm_pagemap_devmem_ops *ops, 372 const struct drm_pagemap_migrate_details *mdetails) 373 { 374 int err = drm_pagemap_migrate_map_pages(devmem->dev, devmem->dpagemap, 375 pagemap_addr, sys_pfns, npages, 376 DMA_TO_DEVICE, mdetails); 377 378 if (err) 379 goto out; 380 381 err = ops->copy_to_devmem(local_pages, pagemap_addr, npages, 382 devmem->pre_migrate_fence); 383 out: 384 drm_pagemap_migrate_unmap_pages(devmem->dev, pagemap_addr, sys_pfns, npages, 385 DMA_TO_DEVICE); 386 return err; 387 } 388 389 /** 390 * struct migrate_range_loc - Cursor into the loop over migrate_pfns for migrating to 391 * device. 392 * @start: The current loop index. 393 * @device: migrating device. 394 * @dpagemap: Pointer to struct drm_pagemap used by the migrating device. 395 * @ops: The copy ops to be used for the migrating device. 396 */ 397 struct migrate_range_loc { 398 unsigned long start; 399 struct device *device; 400 struct drm_pagemap *dpagemap; 401 const struct drm_pagemap_devmem_ops *ops; 402 }; 403 404 static int drm_pagemap_migrate_range(struct drm_pagemap_devmem *devmem, 405 unsigned long src_pfns[], 406 unsigned long dst_pfns[], 407 struct page *pages[], 408 struct drm_pagemap_addr pagemap_addr[], 409 struct migrate_range_loc *last, 410 const struct migrate_range_loc *cur, 411 const struct drm_pagemap_migrate_details *mdetails) 412 { 413 int ret = 0; 414 415 if (cur->start == 0) 416 goto out; 417 418 if (cur->start <= last->start) 419 return 0; 420 421 if (cur->dpagemap == last->dpagemap && cur->ops == last->ops) 422 return 0; 423 424 if (last->dpagemap) 425 ret = drm_pagemap_migrate_remote_to_local(devmem, 426 last->device, 427 last->dpagemap, 428 &dst_pfns[last->start], 429 &pages[last->start], 430 &pagemap_addr[last->start], 431 cur->start - last->start, 432 last->ops, mdetails); 433 434 else 435 ret = drm_pagemap_migrate_sys_to_dev(devmem, 436 &src_pfns[last->start], 437 &pages[last->start], 438 &pagemap_addr[last->start], 439 cur->start - last->start, 440 last->ops, mdetails); 441 442 out: 443 *last = *cur; 444 return ret; 445 } 446 447 /** 448 * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory 449 * @devmem_allocation: The device memory allocation to migrate to. 450 * The caller should hold a reference to the device memory allocation, 451 * and the reference is consumed by this function even if it returns with 452 * an error. 453 * @mm: Pointer to the struct mm_struct. 454 * @start: Start of the virtual address range to migrate. 455 * @end: End of the virtual address range to migrate. 456 * @mdetails: Details to govern the migration. 457 * 458 * This function migrates the specified virtual address range to device memory. 459 * It performs the necessary setup and invokes the driver-specific operations for 460 * migration to device memory. Expected to be called while holding the mmap lock in 461 * at least read mode. 462 * 463 * Note: The @timeslice_ms parameter can typically be used to force data to 464 * remain in pagemap pages long enough for a GPU to perform a task and to prevent 465 * a migration livelock. One alternative would be for the GPU driver to block 466 * in a mmu_notifier for the specified amount of time, but adding the 467 * functionality to the pagemap is likely nicer to the system as a whole. 468 * 469 * Return: %0 on success, negative error code on failure. 470 */ 471 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, 472 struct mm_struct *mm, 473 unsigned long start, unsigned long end, 474 const struct drm_pagemap_migrate_details *mdetails) 475 { 476 const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops; 477 struct drm_pagemap *dpagemap = devmem_allocation->dpagemap; 478 struct dev_pagemap *pagemap = dpagemap->pagemap; 479 struct migrate_vma migrate = { 480 .start = start, 481 .end = end, 482 .pgmap_owner = pagemap->owner, 483 .flags = MIGRATE_VMA_SELECT_SYSTEM | MIGRATE_VMA_SELECT_DEVICE_COHERENT | 484 MIGRATE_VMA_SELECT_DEVICE_PRIVATE, 485 }; 486 unsigned long i, npages = npages_in_range(start, end); 487 unsigned long own_pages = 0, migrated_pages = 0; 488 struct migrate_range_loc cur, last = {.device = dpagemap->drm->dev, .ops = ops}; 489 struct vm_area_struct *vas; 490 struct drm_pagemap_zdd *zdd = NULL; 491 struct page **pages; 492 struct drm_pagemap_addr *pagemap_addr; 493 void *buf; 494 int err; 495 496 mmap_assert_locked(mm); 497 498 if (!ops->populate_devmem_pfn || !ops->copy_to_devmem || 499 !ops->copy_to_ram) 500 return -EOPNOTSUPP; 501 502 vas = vma_lookup(mm, start); 503 if (!vas) { 504 err = -ENOENT; 505 goto err_out; 506 } 507 508 if (end > vas->vm_end || start < vas->vm_start) { 509 err = -EINVAL; 510 goto err_out; 511 } 512 513 if (!vma_is_anonymous(vas)) { 514 err = -EBUSY; 515 goto err_out; 516 } 517 518 buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) + 519 sizeof(*pages), GFP_KERNEL); 520 if (!buf) { 521 err = -ENOMEM; 522 goto err_out; 523 } 524 pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages); 525 pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages; 526 527 zdd = drm_pagemap_zdd_alloc(dpagemap); 528 if (!zdd) { 529 err = -ENOMEM; 530 kvfree(buf); 531 goto err_out; 532 } 533 zdd->devmem_allocation = devmem_allocation; /* Owns ref */ 534 535 migrate.vma = vas; 536 migrate.src = buf; 537 migrate.dst = migrate.src + npages; 538 539 err = migrate_vma_setup(&migrate); 540 if (err) 541 goto err_free; 542 543 if (!migrate.cpages) { 544 /* No pages to migrate. Raced or unknown device pages. */ 545 err = -EBUSY; 546 goto err_free; 547 } 548 549 if (migrate.cpages != npages) { 550 /* 551 * Some pages to migrate. But we want to migrate all or 552 * nothing. Raced or unknown device pages. 553 */ 554 err = -EBUSY; 555 goto err_aborted_migration; 556 } 557 558 /* Count device-private pages to migrate */ 559 for (i = 0; i < npages;) { 560 struct page *src_page = migrate_pfn_to_page(migrate.src[i]); 561 unsigned long nr_pages = src_page ? NR_PAGES(folio_order(page_folio(src_page))) : 1; 562 563 if (src_page && is_zone_device_page(src_page)) { 564 if (page_pgmap(src_page) == pagemap) 565 own_pages += nr_pages; 566 } 567 568 i += nr_pages; 569 } 570 571 drm_dbg(dpagemap->drm, "Total pages %lu; Own pages: %lu.\n", 572 npages, own_pages); 573 if (own_pages == npages) { 574 err = 0; 575 drm_dbg(dpagemap->drm, "Migration wasn't necessary.\n"); 576 goto err_aborted_migration; 577 } else if (own_pages && !mdetails->can_migrate_same_pagemap) { 578 err = -EBUSY; 579 drm_dbg(dpagemap->drm, "Migration aborted due to fragmentation.\n"); 580 goto err_aborted_migration; 581 } 582 583 err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst); 584 if (err) 585 goto err_aborted_migration; 586 587 own_pages = 0; 588 589 for (i = 0; i < npages; ++i) { 590 struct page *page = pfn_to_page(migrate.dst[i]); 591 struct page *src_page = migrate_pfn_to_page(migrate.src[i]); 592 cur.start = i; 593 594 pages[i] = NULL; 595 if (src_page && is_device_private_page(src_page)) { 596 struct drm_pagemap_zdd *src_zdd = src_page->zone_device_data; 597 598 if (page_pgmap(src_page) == pagemap && 599 !mdetails->can_migrate_same_pagemap) { 600 migrate.dst[i] = 0; 601 own_pages++; 602 continue; 603 } 604 if (mdetails->source_peer_migrates) { 605 cur.dpagemap = src_zdd->dpagemap; 606 cur.ops = src_zdd->devmem_allocation->ops; 607 cur.device = cur.dpagemap->drm->dev; 608 pages[i] = src_page; 609 } 610 } 611 if (!pages[i]) { 612 cur.dpagemap = NULL; 613 cur.ops = ops; 614 cur.device = dpagemap->drm->dev; 615 pages[i] = page; 616 } 617 migrate.dst[i] = migrate_pfn(migrate.dst[i]); 618 drm_pagemap_get_devmem_page(page, zdd); 619 620 /* If we switched the migrating drm_pagemap, migrate previous pages now */ 621 err = drm_pagemap_migrate_range(devmem_allocation, migrate.src, migrate.dst, 622 pages, pagemap_addr, &last, &cur, 623 mdetails); 624 if (err) { 625 npages = i + 1; 626 goto err_finalize; 627 } 628 } 629 cur.start = npages; 630 cur.ops = NULL; /* Force migration */ 631 err = drm_pagemap_migrate_range(devmem_allocation, migrate.src, migrate.dst, 632 pages, pagemap_addr, &last, &cur, mdetails); 633 if (err) 634 goto err_finalize; 635 636 drm_WARN_ON(dpagemap->drm, !!own_pages); 637 638 dma_fence_put(devmem_allocation->pre_migrate_fence); 639 devmem_allocation->pre_migrate_fence = NULL; 640 641 /* Upon success bind devmem allocation to range and zdd */ 642 devmem_allocation->timeslice_expiration = get_jiffies_64() + 643 msecs_to_jiffies(mdetails->timeslice_ms); 644 645 err_finalize: 646 if (err) 647 drm_pagemap_migration_unlock_put_pages(npages, migrate.dst); 648 err_aborted_migration: 649 migrate_vma_pages(&migrate); 650 651 for (i = 0; !err && i < npages;) { 652 struct page *page = migrate_pfn_to_page(migrate.src[i]); 653 unsigned long nr_pages = page ? NR_PAGES(folio_order(page_folio(page))) : 1; 654 655 if (migrate.src[i] & MIGRATE_PFN_MIGRATE) 656 migrated_pages += nr_pages; 657 658 i += nr_pages; 659 } 660 661 if (!err && migrated_pages < npages - own_pages) { 662 drm_dbg(dpagemap->drm, "Raced while finalizing migration.\n"); 663 err = -EBUSY; 664 } 665 666 migrate_vma_finalize(&migrate); 667 err_free: 668 drm_pagemap_zdd_put(zdd); 669 kvfree(buf); 670 return err; 671 672 err_out: 673 devmem_allocation->ops->devmem_release(devmem_allocation); 674 return err; 675 } 676 EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem); 677 678 /** 679 * drm_pagemap_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area 680 * @vas: Pointer to the VM area structure, can be NULL 681 * @fault_page: Fault page 682 * @npages: Number of pages to populate 683 * @mpages: Number of pages to migrate 684 * @src_mpfn: Source array of migrate PFNs 685 * @mpfn: Array of migrate PFNs to populate 686 * @addr: Start address for PFN allocation 687 * 688 * This function populates the RAM migrate page frame numbers (PFNs) for the 689 * specified VM area structure. It allocates and locks pages in the VM area for 690 * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use 691 * alloc_page for allocation. 692 * 693 * Return: 0 on success, negative error code on failure. 694 */ 695 static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas, 696 struct page *fault_page, 697 unsigned long npages, 698 unsigned long *mpages, 699 unsigned long *src_mpfn, 700 unsigned long *mpfn, 701 unsigned long addr) 702 { 703 unsigned long i; 704 705 for (i = 0; i < npages;) { 706 struct page *page = NULL, *src_page; 707 struct folio *folio; 708 unsigned int order = 0; 709 710 if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE)) 711 goto next; 712 713 src_page = migrate_pfn_to_page(src_mpfn[i]); 714 if (!src_page) 715 goto next; 716 717 if (fault_page) { 718 if (src_page->zone_device_data != 719 fault_page->zone_device_data) 720 goto next; 721 } 722 723 order = folio_order(page_folio(src_page)); 724 725 /* TODO: Support fallback to single pages if THP allocation fails */ 726 if (vas) 727 folio = vma_alloc_folio(GFP_HIGHUSER, order, vas, addr); 728 else 729 folio = folio_alloc(GFP_HIGHUSER, order); 730 731 if (!folio) 732 goto free_pages; 733 734 page = folio_page(folio, 0); 735 mpfn[i] = migrate_pfn(page_to_pfn(page)); 736 737 next: 738 if (page) 739 addr += page_size(page); 740 else 741 addr += PAGE_SIZE; 742 743 i += NR_PAGES(order); 744 } 745 746 for (i = 0; i < npages;) { 747 struct page *page = migrate_pfn_to_page(mpfn[i]); 748 unsigned int order = 0; 749 750 if (!page) 751 goto next_lock; 752 753 WARN_ON_ONCE(!folio_trylock(page_folio(page))); 754 755 order = folio_order(page_folio(page)); 756 *mpages += NR_PAGES(order); 757 758 next_lock: 759 i += NR_PAGES(order); 760 } 761 762 return 0; 763 764 free_pages: 765 for (i = 0; i < npages;) { 766 struct page *page = migrate_pfn_to_page(mpfn[i]); 767 unsigned int order = 0; 768 769 if (!page) 770 goto next_put; 771 772 put_page(page); 773 mpfn[i] = 0; 774 775 order = folio_order(page_folio(page)); 776 777 next_put: 778 i += NR_PAGES(order); 779 } 780 return -ENOMEM; 781 } 782 783 static void drm_pagemap_dev_unhold_work(struct work_struct *work); 784 static LLIST_HEAD(drm_pagemap_unhold_list); 785 static DECLARE_WORK(drm_pagemap_work, drm_pagemap_dev_unhold_work); 786 787 /** 788 * struct drm_pagemap_dev_hold - Struct to aid in drm_device release. 789 * @link: Link into drm_pagemap_unhold_list for deferred reference releases. 790 * @drm: drm device to put. 791 * 792 * When a struct drm_pagemap is released, we also need to release the 793 * reference it holds on the drm device. However, typically that needs 794 * to be done separately from a system-wide workqueue. 795 * Each time a struct drm_pagemap is initialized 796 * (or re-initialized if cached) therefore allocate a separate 797 * drm_pagemap_dev_hold item, from which we put the drm device and 798 * associated module. 799 */ 800 struct drm_pagemap_dev_hold { 801 struct llist_node link; 802 struct drm_device *drm; 803 }; 804 805 static void drm_pagemap_release(struct kref *ref) 806 { 807 struct drm_pagemap *dpagemap = container_of(ref, typeof(*dpagemap), ref); 808 struct drm_pagemap_dev_hold *dev_hold = dpagemap->dev_hold; 809 810 /* 811 * We know the pagemap provider is alive at this point, since 812 * the struct drm_pagemap_dev_hold holds a reference to the 813 * pagemap provider drm_device and its module. 814 */ 815 dpagemap->dev_hold = NULL; 816 drm_pagemap_shrinker_add(dpagemap); 817 llist_add(&dev_hold->link, &drm_pagemap_unhold_list); 818 schedule_work(&drm_pagemap_work); 819 /* 820 * Here, either the provider device is still alive, since if called from 821 * page_free(), the caller is holding a reference on the dev_pagemap, 822 * or if called from drm_pagemap_put(), the direct caller is still alive. 823 * This ensures we can't race with THIS module unload. 824 */ 825 } 826 827 static void drm_pagemap_dev_unhold_work(struct work_struct *work) 828 { 829 struct llist_node *node = llist_del_all(&drm_pagemap_unhold_list); 830 struct drm_pagemap_dev_hold *dev_hold, *next; 831 832 /* 833 * Deferred release of drm_pagemap provider device and module. 834 * THIS module is kept alive during the release by the 835 * flush_work() in the drm_pagemap_exit() function. 836 */ 837 llist_for_each_entry_safe(dev_hold, next, node, link) { 838 struct drm_device *drm = dev_hold->drm; 839 struct module *module = drm->driver->fops->owner; 840 841 drm_dbg(drm, "Releasing reference on provider device and module.\n"); 842 drm_dev_put(drm); 843 module_put(module); 844 kfree(dev_hold); 845 } 846 } 847 848 static struct drm_pagemap_dev_hold * 849 drm_pagemap_dev_hold(struct drm_pagemap *dpagemap) 850 { 851 struct drm_pagemap_dev_hold *dev_hold; 852 struct drm_device *drm = dpagemap->drm; 853 854 dev_hold = kzalloc_obj(*dev_hold); 855 if (!dev_hold) 856 return ERR_PTR(-ENOMEM); 857 858 init_llist_node(&dev_hold->link); 859 dev_hold->drm = drm; 860 (void)try_module_get(drm->driver->fops->owner); 861 drm_dev_get(drm); 862 863 return dev_hold; 864 } 865 866 /** 867 * drm_pagemap_reinit() - Reinitialize a drm_pagemap 868 * @dpagemap: The drm_pagemap to reinitialize 869 * 870 * Reinitialize a drm_pagemap, for which drm_pagemap_release 871 * has already been called. This interface is intended for the 872 * situation where the driver caches a destroyed drm_pagemap. 873 * 874 * Return: 0 on success, negative error code on failure. 875 */ 876 int drm_pagemap_reinit(struct drm_pagemap *dpagemap) 877 { 878 dpagemap->dev_hold = drm_pagemap_dev_hold(dpagemap); 879 if (IS_ERR(dpagemap->dev_hold)) 880 return PTR_ERR(dpagemap->dev_hold); 881 882 kref_init(&dpagemap->ref); 883 return 0; 884 } 885 EXPORT_SYMBOL(drm_pagemap_reinit); 886 887 /** 888 * drm_pagemap_init() - Initialize a pre-allocated drm_pagemap 889 * @dpagemap: The drm_pagemap to initialize. 890 * @pagemap: The associated dev_pagemap providing the device 891 * private pages. 892 * @drm: The drm device. The drm_pagemap holds a reference on the 893 * drm_device and the module owning the drm_device until 894 * drm_pagemap_release(). This facilitates drm_pagemap exporting. 895 * @ops: The drm_pagemap ops. 896 * 897 * Initialize and take an initial reference on a drm_pagemap. 898 * After successful return, use drm_pagemap_put() to destroy. 899 * 900 ** Return: 0 on success, negative error code on error. 901 */ 902 int drm_pagemap_init(struct drm_pagemap *dpagemap, 903 struct dev_pagemap *pagemap, 904 struct drm_device *drm, 905 const struct drm_pagemap_ops *ops) 906 { 907 kref_init(&dpagemap->ref); 908 dpagemap->ops = ops; 909 dpagemap->pagemap = pagemap; 910 dpagemap->drm = drm; 911 dpagemap->cache = NULL; 912 INIT_LIST_HEAD(&dpagemap->shrink_link); 913 914 return drm_pagemap_reinit(dpagemap); 915 } 916 EXPORT_SYMBOL(drm_pagemap_init); 917 918 /** 919 * drm_pagemap_put() - Put a struct drm_pagemap reference 920 * @dpagemap: Pointer to a struct drm_pagemap object. 921 * 922 * Puts a struct drm_pagemap reference and frees the drm_pagemap object 923 * if the refount reaches zero. 924 */ 925 void drm_pagemap_put(struct drm_pagemap *dpagemap) 926 { 927 if (likely(dpagemap)) { 928 drm_pagemap_shrinker_might_lock(dpagemap); 929 kref_put(&dpagemap->ref, drm_pagemap_release); 930 } 931 } 932 EXPORT_SYMBOL(drm_pagemap_put); 933 934 /** 935 * drm_pagemap_evict_to_ram() - Evict GPU SVM range to RAM 936 * @devmem_allocation: Pointer to the device memory allocation 937 * 938 * Similar to __drm_pagemap_migrate_to_ram but does not require mmap lock and 939 * migration done via migrate_device_* functions. 940 * 941 * Return: 0 on success, negative error code on failure. 942 */ 943 int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation) 944 { 945 const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops; 946 struct drm_pagemap_migrate_details mdetails = {}; 947 unsigned long npages, mpages = 0; 948 struct page **pages; 949 unsigned long *src, *dst; 950 struct drm_pagemap_addr *pagemap_addr; 951 void *buf; 952 int i, err = 0; 953 unsigned int retry_count = 2; 954 955 npages = devmem_allocation->size >> PAGE_SHIFT; 956 957 retry: 958 if (!mmget_not_zero(devmem_allocation->mm)) 959 return -EFAULT; 960 961 buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*pagemap_addr) + 962 sizeof(*pages), GFP_KERNEL); 963 if (!buf) { 964 err = -ENOMEM; 965 goto err_out; 966 } 967 src = buf; 968 dst = buf + (sizeof(*src) * npages); 969 pagemap_addr = buf + (2 * sizeof(*src) * npages); 970 pages = buf + (2 * sizeof(*src) + sizeof(*pagemap_addr)) * npages; 971 972 err = ops->populate_devmem_pfn(devmem_allocation, npages, src); 973 if (err) 974 goto err_free; 975 976 err = migrate_device_pfns(src, npages); 977 if (err) 978 goto err_free; 979 980 err = drm_pagemap_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages, 981 src, dst, 0); 982 if (err || !mpages) 983 goto err_finalize; 984 985 err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, 986 devmem_allocation->dpagemap, pagemap_addr, 987 dst, npages, DMA_FROM_DEVICE, 988 &mdetails); 989 if (err) 990 goto err_finalize; 991 992 for (i = 0; i < npages; ++i) 993 pages[i] = migrate_pfn_to_page(src[i]); 994 995 err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL); 996 if (err) 997 goto err_finalize; 998 999 err_finalize: 1000 if (err) 1001 drm_pagemap_migration_unlock_put_pages(npages, dst); 1002 migrate_device_pages(src, dst, npages); 1003 migrate_device_finalize(src, dst, npages); 1004 drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, dst, npages, 1005 DMA_FROM_DEVICE); 1006 1007 err_free: 1008 kvfree(buf); 1009 err_out: 1010 mmput_async(devmem_allocation->mm); 1011 1012 if (completion_done(&devmem_allocation->detached)) 1013 return 0; 1014 1015 if (retry_count--) { 1016 cond_resched(); 1017 goto retry; 1018 } 1019 1020 return err ?: -EBUSY; 1021 } 1022 EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram); 1023 1024 /** 1025 * __drm_pagemap_migrate_to_ram() - Migrate GPU SVM range to RAM (internal) 1026 * @vas: Pointer to the VM area structure 1027 * @page: Pointer to the page for fault handling. 1028 * @fault_addr: Fault address 1029 * @size: Size of migration 1030 * 1031 * This internal function performs the migration of the specified GPU SVM range 1032 * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and 1033 * invokes the driver-specific operations for migration to RAM. 1034 * 1035 * Return: 0 on success, negative error code on failure. 1036 */ 1037 static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas, 1038 struct page *page, 1039 unsigned long fault_addr, 1040 unsigned long size) 1041 { 1042 struct migrate_vma migrate = { 1043 .vma = vas, 1044 .pgmap_owner = page_pgmap(page)->owner, 1045 .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE | 1046 MIGRATE_VMA_SELECT_DEVICE_COHERENT, 1047 .fault_page = page, 1048 }; 1049 struct drm_pagemap_migrate_details mdetails = {}; 1050 struct drm_pagemap_zdd *zdd; 1051 const struct drm_pagemap_devmem_ops *ops; 1052 struct device *dev = NULL; 1053 unsigned long npages, mpages = 0; 1054 struct page **pages; 1055 struct drm_pagemap_addr *pagemap_addr; 1056 unsigned long start, end; 1057 void *buf; 1058 int i, err = 0; 1059 1060 zdd = page->zone_device_data; 1061 if (time_before64(get_jiffies_64(), zdd->devmem_allocation->timeslice_expiration)) 1062 return 0; 1063 1064 start = ALIGN_DOWN(fault_addr, size); 1065 end = ALIGN(fault_addr + 1, size); 1066 1067 /* Corner where VMA area struct has been partially unmapped */ 1068 if (start < vas->vm_start) 1069 start = vas->vm_start; 1070 if (end > vas->vm_end) 1071 end = vas->vm_end; 1072 1073 migrate.start = start; 1074 migrate.end = end; 1075 npages = npages_in_range(start, end); 1076 1077 buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) + 1078 sizeof(*pages), GFP_KERNEL); 1079 if (!buf) { 1080 err = -ENOMEM; 1081 goto err_out; 1082 } 1083 pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages); 1084 pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages; 1085 1086 migrate.vma = vas; 1087 migrate.src = buf; 1088 migrate.dst = migrate.src + npages; 1089 1090 err = migrate_vma_setup(&migrate); 1091 if (err) 1092 goto err_free; 1093 1094 /* Raced with another CPU fault, nothing to do */ 1095 if (!migrate.cpages) 1096 goto err_free; 1097 1098 ops = zdd->devmem_allocation->ops; 1099 dev = zdd->devmem_allocation->dev; 1100 1101 err = drm_pagemap_migrate_populate_ram_pfn(vas, page, npages, &mpages, 1102 migrate.src, migrate.dst, 1103 start); 1104 if (err) 1105 goto err_finalize; 1106 1107 err = drm_pagemap_migrate_map_pages(dev, zdd->dpagemap, pagemap_addr, migrate.dst, npages, 1108 DMA_FROM_DEVICE, &mdetails); 1109 if (err) 1110 goto err_finalize; 1111 1112 for (i = 0; i < npages; ++i) 1113 pages[i] = migrate_pfn_to_page(migrate.src[i]); 1114 1115 err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL); 1116 if (err) 1117 goto err_finalize; 1118 1119 err_finalize: 1120 if (err) 1121 drm_pagemap_migration_unlock_put_pages(npages, migrate.dst); 1122 migrate_vma_pages(&migrate); 1123 migrate_vma_finalize(&migrate); 1124 if (dev) 1125 drm_pagemap_migrate_unmap_pages(dev, pagemap_addr, migrate.dst, 1126 npages, DMA_FROM_DEVICE); 1127 err_free: 1128 kvfree(buf); 1129 err_out: 1130 1131 return err; 1132 } 1133 1134 /** 1135 * drm_pagemap_folio_free() - Put GPU SVM zone device data associated with a folio 1136 * @folio: Pointer to the folio 1137 * 1138 * This function is a callback used to put the GPU SVM zone device data 1139 * associated with a page when it is being released. 1140 */ 1141 static void drm_pagemap_folio_free(struct folio *folio) 1142 { 1143 drm_pagemap_zdd_put(folio->page.zone_device_data); 1144 } 1145 1146 /** 1147 * drm_pagemap_migrate_to_ram() - Migrate a virtual range to RAM (page fault handler) 1148 * @vmf: Pointer to the fault information structure 1149 * 1150 * This function is a page fault handler used to migrate a virtual range 1151 * to ram. The device memory allocation in which the device page is found is 1152 * migrated in its entirety. 1153 * 1154 * Returns: 1155 * VM_FAULT_SIGBUS on failure, 0 on success. 1156 */ 1157 static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf) 1158 { 1159 struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data; 1160 int err; 1161 1162 err = __drm_pagemap_migrate_to_ram(vmf->vma, 1163 vmf->page, vmf->address, 1164 zdd->devmem_allocation->size); 1165 1166 return err ? VM_FAULT_SIGBUS : 0; 1167 } 1168 1169 static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = { 1170 .folio_free = drm_pagemap_folio_free, 1171 .migrate_to_ram = drm_pagemap_migrate_to_ram, 1172 }; 1173 1174 /** 1175 * drm_pagemap_pagemap_ops_get() - Retrieve GPU SVM device page map operations 1176 * 1177 * Returns: 1178 * Pointer to the GPU SVM device page map operations structure. 1179 */ 1180 const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void) 1181 { 1182 return &drm_pagemap_pagemap_ops; 1183 } 1184 EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get); 1185 1186 /** 1187 * drm_pagemap_devmem_init() - Initialize a drm_pagemap device memory allocation 1188 * 1189 * @devmem_allocation: The struct drm_pagemap_devmem to initialize. 1190 * @dev: Pointer to the device structure which device memory allocation belongs to 1191 * @mm: Pointer to the mm_struct for the address space 1192 * @ops: Pointer to the operations structure for GPU SVM device memory 1193 * @dpagemap: The struct drm_pagemap we're allocating from. 1194 * @size: Size of device memory allocation 1195 * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts. 1196 * (May be NULL). 1197 */ 1198 void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, 1199 struct device *dev, struct mm_struct *mm, 1200 const struct drm_pagemap_devmem_ops *ops, 1201 struct drm_pagemap *dpagemap, size_t size, 1202 struct dma_fence *pre_migrate_fence) 1203 { 1204 init_completion(&devmem_allocation->detached); 1205 devmem_allocation->dev = dev; 1206 devmem_allocation->mm = mm; 1207 devmem_allocation->ops = ops; 1208 devmem_allocation->dpagemap = dpagemap; 1209 devmem_allocation->size = size; 1210 devmem_allocation->pre_migrate_fence = pre_migrate_fence; 1211 } 1212 EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init); 1213 1214 /** 1215 * drm_pagemap_page_to_dpagemap() - Return a pointer the drm_pagemap of a page 1216 * @page: The struct page. 1217 * 1218 * Return: A pointer to the struct drm_pagemap of a device private page that 1219 * was populated from the struct drm_pagemap. If the page was *not* populated 1220 * from a struct drm_pagemap, the result is undefined and the function call 1221 * may result in dereferencing and invalid address. 1222 */ 1223 struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page) 1224 { 1225 struct drm_pagemap_zdd *zdd = page->zone_device_data; 1226 1227 return zdd->devmem_allocation->dpagemap; 1228 } 1229 EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap); 1230 1231 /** 1232 * drm_pagemap_populate_mm() - Populate a virtual range with device memory pages 1233 * @dpagemap: Pointer to the drm_pagemap managing the device memory 1234 * @start: Start of the virtual range to populate. 1235 * @end: End of the virtual range to populate. 1236 * @mm: Pointer to the virtual address space. 1237 * @timeslice_ms: The time requested for the migrated pagemap pages to 1238 * be present in @mm before being allowed to be migrated back. 1239 * 1240 * Attempt to populate a virtual range with device memory pages, 1241 * clearing them or migrating data from the existing pages if necessary. 1242 * The function is best effort only, and implementations may vary 1243 * in how hard they try to satisfy the request. 1244 * 1245 * Return: %0 on success, negative error code on error. If the hardware 1246 * device was removed / unbound the function will return %-ENODEV. 1247 */ 1248 int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap, 1249 unsigned long start, unsigned long end, 1250 struct mm_struct *mm, 1251 unsigned long timeslice_ms) 1252 { 1253 int err; 1254 1255 if (!mmget_not_zero(mm)) 1256 return -EFAULT; 1257 mmap_read_lock(mm); 1258 err = dpagemap->ops->populate_mm(dpagemap, start, end, mm, 1259 timeslice_ms); 1260 mmap_read_unlock(mm); 1261 mmput(mm); 1262 1263 return err; 1264 } 1265 EXPORT_SYMBOL(drm_pagemap_populate_mm); 1266 1267 void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim) 1268 { 1269 if (dpagemap->ops->destroy) 1270 dpagemap->ops->destroy(dpagemap, is_atomic_or_reclaim); 1271 else 1272 kfree(dpagemap); 1273 } 1274 1275 static void drm_pagemap_exit(void) 1276 { 1277 flush_work(&drm_pagemap_work); 1278 if (WARN_ON(!llist_empty(&drm_pagemap_unhold_list))) 1279 disable_work_sync(&drm_pagemap_work); 1280 } 1281 module_exit(drm_pagemap_exit); 1282