1 // SPDX-License-Identifier: GPL-2.0-only OR MIT 2 /* 3 * Copyright © 2024-2025 Intel Corporation 4 */ 5 6 #include <linux/dma-mapping.h> 7 #include <linux/migrate.h> 8 #include <linux/pagemap.h> 9 #include <drm/drm_pagemap.h> 10 11 /** 12 * DOC: Overview 13 * 14 * The DRM pagemap layer is intended to augment the dev_pagemap functionality by 15 * providing a way to populate a struct mm_struct virtual range with device 16 * private pages and to provide helpers to abstract device memory allocations, 17 * to migrate memory back and forth between device memory and system RAM and 18 * to handle access (and in the future migration) between devices implementing 19 * a fast interconnect that is not necessarily visible to the rest of the 20 * system. 21 * 22 * Typically the DRM pagemap receives requests from one or more DRM GPU SVM 23 * instances to populate struct mm_struct virtual ranges with memory. 24 */ 25 26 /** 27 * DOC: Migration 28 * 29 * The migration support is quite simple, allowing migration between RAM and 30 * device memory at the range granularity. For example, GPU SVM currently does 31 * not support mixing RAM and device memory pages within a range. This means 32 * that upon GPU fault, the entire range can be migrated to device memory, and 33 * upon CPU fault, the entire range is migrated to RAM. Mixed RAM and device 34 * memory storage within a range could be added in the future if required. 35 * 36 * The reasoning for only supporting range granularity is as follows: it 37 * simplifies the implementation, and range sizes are driver-defined and should 38 * be relatively small. 39 * 40 * 41 * Key DRM pagemap components: 42 * 43 * - Device Memory Allocations: 44 * Embedded structure containing enough information for the drm_pagemap to 45 * migrate to / from device memory. 46 * 47 * - Device Memory Operations: 48 * Define the interface for driver-specific device memory operations 49 * release memory, populate pfns, and copy to / from device memory. 50 */ 51 52 /** 53 * struct drm_pagemap_zdd - GPU SVM zone device data 54 * 55 * @refcount: Reference count for the zdd 56 * @devmem_allocation: device memory allocation 57 * @device_private_page_owner: Device private pages owner 58 * 59 * This structure serves as a generic wrapper installed in 60 * page->zone_device_data. It provides infrastructure for looking up a device 61 * memory allocation upon CPU page fault and asynchronously releasing device 62 * memory once the CPU has no page references. Asynchronous release is useful 63 * because CPU page references can be dropped in IRQ contexts, while releasing 64 * device memory likely requires sleeping locks. 65 */ 66 struct drm_pagemap_zdd { 67 struct kref refcount; 68 struct drm_pagemap_devmem *devmem_allocation; 69 void *device_private_page_owner; 70 }; 71 72 /** 73 * drm_pagemap_zdd_alloc() - Allocate a zdd structure. 74 * @device_private_page_owner: Device private pages owner 75 * 76 * This function allocates and initializes a new zdd structure. It sets up the 77 * reference count and initializes the destroy work. 78 * 79 * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure. 80 */ 81 static struct drm_pagemap_zdd * 82 drm_pagemap_zdd_alloc(void *device_private_page_owner) 83 { 84 struct drm_pagemap_zdd *zdd; 85 86 zdd = kmalloc(sizeof(*zdd), GFP_KERNEL); 87 if (!zdd) 88 return NULL; 89 90 kref_init(&zdd->refcount); 91 zdd->devmem_allocation = NULL; 92 zdd->device_private_page_owner = device_private_page_owner; 93 94 return zdd; 95 } 96 97 /** 98 * drm_pagemap_zdd_get() - Get a reference to a zdd structure. 99 * @zdd: Pointer to the zdd structure. 100 * 101 * This function increments the reference count of the provided zdd structure. 102 * 103 * Return: Pointer to the zdd structure. 104 */ 105 static struct drm_pagemap_zdd *drm_pagemap_zdd_get(struct drm_pagemap_zdd *zdd) 106 { 107 kref_get(&zdd->refcount); 108 return zdd; 109 } 110 111 /** 112 * drm_pagemap_zdd_destroy() - Destroy a zdd structure. 113 * @ref: Pointer to the reference count structure. 114 * 115 * This function queues the destroy_work of the zdd for asynchronous destruction. 116 */ 117 static void drm_pagemap_zdd_destroy(struct kref *ref) 118 { 119 struct drm_pagemap_zdd *zdd = 120 container_of(ref, struct drm_pagemap_zdd, refcount); 121 struct drm_pagemap_devmem *devmem = zdd->devmem_allocation; 122 123 if (devmem) { 124 complete_all(&devmem->detached); 125 if (devmem->ops->devmem_release) 126 devmem->ops->devmem_release(devmem); 127 } 128 kfree(zdd); 129 } 130 131 /** 132 * drm_pagemap_zdd_put() - Put a zdd reference. 133 * @zdd: Pointer to the zdd structure. 134 * 135 * This function decrements the reference count of the provided zdd structure 136 * and schedules its destruction if the count drops to zero. 137 */ 138 static void drm_pagemap_zdd_put(struct drm_pagemap_zdd *zdd) 139 { 140 kref_put(&zdd->refcount, drm_pagemap_zdd_destroy); 141 } 142 143 /** 144 * drm_pagemap_migration_unlock_put_page() - Put a migration page 145 * @page: Pointer to the page to put 146 * 147 * This function unlocks and puts a page. 148 */ 149 static void drm_pagemap_migration_unlock_put_page(struct page *page) 150 { 151 unlock_page(page); 152 put_page(page); 153 } 154 155 /** 156 * drm_pagemap_migration_unlock_put_pages() - Put migration pages 157 * @npages: Number of pages 158 * @migrate_pfn: Array of migrate page frame numbers 159 * 160 * This function unlocks and puts an array of pages. 161 */ 162 static void drm_pagemap_migration_unlock_put_pages(unsigned long npages, 163 unsigned long *migrate_pfn) 164 { 165 unsigned long i; 166 167 for (i = 0; i < npages; ++i) { 168 struct page *page; 169 170 if (!migrate_pfn[i]) 171 continue; 172 173 page = migrate_pfn_to_page(migrate_pfn[i]); 174 drm_pagemap_migration_unlock_put_page(page); 175 migrate_pfn[i] = 0; 176 } 177 } 178 179 /** 180 * drm_pagemap_get_devmem_page() - Get a reference to a device memory page 181 * @page: Pointer to the page 182 * @zdd: Pointer to the GPU SVM zone device data 183 * 184 * This function associates the given page with the specified GPU SVM zone 185 * device data and initializes it for zone device usage. 186 */ 187 static void drm_pagemap_get_devmem_page(struct page *page, 188 struct drm_pagemap_zdd *zdd) 189 { 190 page->zone_device_data = drm_pagemap_zdd_get(zdd); 191 zone_device_page_init(page); 192 } 193 194 /** 195 * drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration 196 * @dev: The device for which the pages are being mapped 197 * @dma_addr: Array to store DMA addresses corresponding to mapped pages 198 * @migrate_pfn: Array of migrate page frame numbers to map 199 * @npages: Number of pages to map 200 * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL) 201 * 202 * This function maps pages of memory for migration usage in GPU SVM. It 203 * iterates over each page frame number provided in @migrate_pfn, maps the 204 * corresponding page, and stores the DMA address in the provided @dma_addr 205 * array. 206 * 207 * Returns: 0 on success, -EFAULT if an error occurs during mapping. 208 */ 209 static int drm_pagemap_migrate_map_pages(struct device *dev, 210 dma_addr_t *dma_addr, 211 unsigned long *migrate_pfn, 212 unsigned long npages, 213 enum dma_data_direction dir) 214 { 215 unsigned long i; 216 217 for (i = 0; i < npages; ++i) { 218 struct page *page = migrate_pfn_to_page(migrate_pfn[i]); 219 220 if (!page) 221 continue; 222 223 if (WARN_ON_ONCE(is_zone_device_page(page))) 224 return -EFAULT; 225 226 dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir); 227 if (dma_mapping_error(dev, dma_addr[i])) 228 return -EFAULT; 229 } 230 231 return 0; 232 } 233 234 /** 235 * drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration 236 * @dev: The device for which the pages were mapped 237 * @dma_addr: Array of DMA addresses corresponding to mapped pages 238 * @npages: Number of pages to unmap 239 * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL) 240 * 241 * This function unmaps previously mapped pages of memory for GPU Shared Virtual 242 * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks 243 * if it's valid and not already unmapped, and unmaps the corresponding page. 244 */ 245 static void drm_pagemap_migrate_unmap_pages(struct device *dev, 246 dma_addr_t *dma_addr, 247 unsigned long npages, 248 enum dma_data_direction dir) 249 { 250 unsigned long i; 251 252 for (i = 0; i < npages; ++i) { 253 if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i])) 254 continue; 255 256 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); 257 } 258 } 259 260 static unsigned long 261 npages_in_range(unsigned long start, unsigned long end) 262 { 263 return (end - start) >> PAGE_SHIFT; 264 } 265 266 /** 267 * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory 268 * @devmem_allocation: The device memory allocation to migrate to. 269 * The caller should hold a reference to the device memory allocation, 270 * and the reference is consumed by this function unless it returns with 271 * an error. 272 * @mm: Pointer to the struct mm_struct. 273 * @start: Start of the virtual address range to migrate. 274 * @end: End of the virtual address range to migrate. 275 * @timeslice_ms: The time requested for the migrated pagemap pages to 276 * be present in @mm before being allowed to be migrated back. 277 * @pgmap_owner: Not used currently, since only system memory is considered. 278 * 279 * This function migrates the specified virtual address range to device memory. 280 * It performs the necessary setup and invokes the driver-specific operations for 281 * migration to device memory. Expected to be called while holding the mmap lock in 282 * at least read mode. 283 * 284 * Note: The @timeslice_ms parameter can typically be used to force data to 285 * remain in pagemap pages long enough for a GPU to perform a task and to prevent 286 * a migration livelock. One alternative would be for the GPU driver to block 287 * in a mmu_notifier for the specified amount of time, but adding the 288 * functionality to the pagemap is likely nicer to the system as a whole. 289 * 290 * Return: %0 on success, negative error code on failure. 291 */ 292 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation, 293 struct mm_struct *mm, 294 unsigned long start, unsigned long end, 295 unsigned long timeslice_ms, 296 void *pgmap_owner) 297 { 298 const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops; 299 struct migrate_vma migrate = { 300 .start = start, 301 .end = end, 302 .pgmap_owner = pgmap_owner, 303 .flags = MIGRATE_VMA_SELECT_SYSTEM, 304 }; 305 unsigned long i, npages = npages_in_range(start, end); 306 struct vm_area_struct *vas; 307 struct drm_pagemap_zdd *zdd = NULL; 308 struct page **pages; 309 dma_addr_t *dma_addr; 310 void *buf; 311 int err; 312 313 mmap_assert_locked(mm); 314 315 if (!ops->populate_devmem_pfn || !ops->copy_to_devmem || 316 !ops->copy_to_ram) 317 return -EOPNOTSUPP; 318 319 vas = vma_lookup(mm, start); 320 if (!vas) { 321 err = -ENOENT; 322 goto err_out; 323 } 324 325 if (end > vas->vm_end || start < vas->vm_start) { 326 err = -EINVAL; 327 goto err_out; 328 } 329 330 if (!vma_is_anonymous(vas)) { 331 err = -EBUSY; 332 goto err_out; 333 } 334 335 buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) + 336 sizeof(*pages), GFP_KERNEL); 337 if (!buf) { 338 err = -ENOMEM; 339 goto err_out; 340 } 341 dma_addr = buf + (2 * sizeof(*migrate.src) * npages); 342 pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages; 343 344 zdd = drm_pagemap_zdd_alloc(pgmap_owner); 345 if (!zdd) { 346 err = -ENOMEM; 347 goto err_free; 348 } 349 350 migrate.vma = vas; 351 migrate.src = buf; 352 migrate.dst = migrate.src + npages; 353 354 err = migrate_vma_setup(&migrate); 355 if (err) 356 goto err_free; 357 358 if (!migrate.cpages) { 359 err = -EFAULT; 360 goto err_free; 361 } 362 363 if (migrate.cpages != npages) { 364 err = -EBUSY; 365 goto err_finalize; 366 } 367 368 err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst); 369 if (err) 370 goto err_finalize; 371 372 err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr, 373 migrate.src, npages, DMA_TO_DEVICE); 374 if (err) 375 goto err_finalize; 376 377 for (i = 0; i < npages; ++i) { 378 struct page *page = pfn_to_page(migrate.dst[i]); 379 380 pages[i] = page; 381 migrate.dst[i] = migrate_pfn(migrate.dst[i]); 382 drm_pagemap_get_devmem_page(page, zdd); 383 } 384 385 err = ops->copy_to_devmem(pages, dma_addr, npages); 386 if (err) 387 goto err_finalize; 388 389 /* Upon success bind devmem allocation to range and zdd */ 390 devmem_allocation->timeslice_expiration = get_jiffies_64() + 391 msecs_to_jiffies(timeslice_ms); 392 zdd->devmem_allocation = devmem_allocation; /* Owns ref */ 393 394 err_finalize: 395 if (err) 396 drm_pagemap_migration_unlock_put_pages(npages, migrate.dst); 397 migrate_vma_pages(&migrate); 398 migrate_vma_finalize(&migrate); 399 drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages, 400 DMA_TO_DEVICE); 401 err_free: 402 if (zdd) 403 drm_pagemap_zdd_put(zdd); 404 kvfree(buf); 405 err_out: 406 return err; 407 } 408 EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem); 409 410 /** 411 * drm_pagemap_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area 412 * @vas: Pointer to the VM area structure, can be NULL 413 * @fault_page: Fault page 414 * @npages: Number of pages to populate 415 * @mpages: Number of pages to migrate 416 * @src_mpfn: Source array of migrate PFNs 417 * @mpfn: Array of migrate PFNs to populate 418 * @addr: Start address for PFN allocation 419 * 420 * This function populates the RAM migrate page frame numbers (PFNs) for the 421 * specified VM area structure. It allocates and locks pages in the VM area for 422 * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use 423 * alloc_page for allocation. 424 * 425 * Return: 0 on success, negative error code on failure. 426 */ 427 static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas, 428 struct page *fault_page, 429 unsigned long npages, 430 unsigned long *mpages, 431 unsigned long *src_mpfn, 432 unsigned long *mpfn, 433 unsigned long addr) 434 { 435 unsigned long i; 436 437 for (i = 0; i < npages; ++i, addr += PAGE_SIZE) { 438 struct page *page, *src_page; 439 440 if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE)) 441 continue; 442 443 src_page = migrate_pfn_to_page(src_mpfn[i]); 444 if (!src_page) 445 continue; 446 447 if (fault_page) { 448 if (src_page->zone_device_data != 449 fault_page->zone_device_data) 450 continue; 451 } 452 453 if (vas) 454 page = alloc_page_vma(GFP_HIGHUSER, vas, addr); 455 else 456 page = alloc_page(GFP_HIGHUSER); 457 458 if (!page) 459 goto free_pages; 460 461 mpfn[i] = migrate_pfn(page_to_pfn(page)); 462 } 463 464 for (i = 0; i < npages; ++i) { 465 struct page *page = migrate_pfn_to_page(mpfn[i]); 466 467 if (!page) 468 continue; 469 470 WARN_ON_ONCE(!trylock_page(page)); 471 ++*mpages; 472 } 473 474 return 0; 475 476 free_pages: 477 for (i = 0; i < npages; ++i) { 478 struct page *page = migrate_pfn_to_page(mpfn[i]); 479 480 if (!page) 481 continue; 482 483 put_page(page); 484 mpfn[i] = 0; 485 } 486 return -ENOMEM; 487 } 488 489 /** 490 * drm_pagemap_evict_to_ram() - Evict GPU SVM range to RAM 491 * @devmem_allocation: Pointer to the device memory allocation 492 * 493 * Similar to __drm_pagemap_migrate_to_ram but does not require mmap lock and 494 * migration done via migrate_device_* functions. 495 * 496 * Return: 0 on success, negative error code on failure. 497 */ 498 int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation) 499 { 500 const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops; 501 unsigned long npages, mpages = 0; 502 struct page **pages; 503 unsigned long *src, *dst; 504 dma_addr_t *dma_addr; 505 void *buf; 506 int i, err = 0; 507 unsigned int retry_count = 2; 508 509 npages = devmem_allocation->size >> PAGE_SHIFT; 510 511 retry: 512 if (!mmget_not_zero(devmem_allocation->mm)) 513 return -EFAULT; 514 515 buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) + 516 sizeof(*pages), GFP_KERNEL); 517 if (!buf) { 518 err = -ENOMEM; 519 goto err_out; 520 } 521 src = buf; 522 dst = buf + (sizeof(*src) * npages); 523 dma_addr = buf + (2 * sizeof(*src) * npages); 524 pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages; 525 526 err = ops->populate_devmem_pfn(devmem_allocation, npages, src); 527 if (err) 528 goto err_free; 529 530 err = migrate_device_pfns(src, npages); 531 if (err) 532 goto err_free; 533 534 err = drm_pagemap_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages, 535 src, dst, 0); 536 if (err || !mpages) 537 goto err_finalize; 538 539 err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr, 540 dst, npages, DMA_FROM_DEVICE); 541 if (err) 542 goto err_finalize; 543 544 for (i = 0; i < npages; ++i) 545 pages[i] = migrate_pfn_to_page(src[i]); 546 547 err = ops->copy_to_ram(pages, dma_addr, npages); 548 if (err) 549 goto err_finalize; 550 551 err_finalize: 552 if (err) 553 drm_pagemap_migration_unlock_put_pages(npages, dst); 554 migrate_device_pages(src, dst, npages); 555 migrate_device_finalize(src, dst, npages); 556 drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages, 557 DMA_FROM_DEVICE); 558 err_free: 559 kvfree(buf); 560 err_out: 561 mmput_async(devmem_allocation->mm); 562 563 if (completion_done(&devmem_allocation->detached)) 564 return 0; 565 566 if (retry_count--) { 567 cond_resched(); 568 goto retry; 569 } 570 571 return err ?: -EBUSY; 572 } 573 EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram); 574 575 /** 576 * __drm_pagemap_migrate_to_ram() - Migrate GPU SVM range to RAM (internal) 577 * @vas: Pointer to the VM area structure 578 * @device_private_page_owner: Device private pages owner 579 * @page: Pointer to the page for fault handling (can be NULL) 580 * @fault_addr: Fault address 581 * @size: Size of migration 582 * 583 * This internal function performs the migration of the specified GPU SVM range 584 * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and 585 * invokes the driver-specific operations for migration to RAM. 586 * 587 * Return: 0 on success, negative error code on failure. 588 */ 589 static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas, 590 void *device_private_page_owner, 591 struct page *page, 592 unsigned long fault_addr, 593 unsigned long size) 594 { 595 struct migrate_vma migrate = { 596 .vma = vas, 597 .pgmap_owner = device_private_page_owner, 598 .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE | 599 MIGRATE_VMA_SELECT_DEVICE_COHERENT, 600 .fault_page = page, 601 }; 602 struct drm_pagemap_zdd *zdd; 603 const struct drm_pagemap_devmem_ops *ops; 604 struct device *dev = NULL; 605 unsigned long npages, mpages = 0; 606 struct page **pages; 607 dma_addr_t *dma_addr; 608 unsigned long start, end; 609 void *buf; 610 int i, err = 0; 611 612 if (page) { 613 zdd = page->zone_device_data; 614 if (time_before64(get_jiffies_64(), 615 zdd->devmem_allocation->timeslice_expiration)) 616 return 0; 617 } 618 619 start = ALIGN_DOWN(fault_addr, size); 620 end = ALIGN(fault_addr + 1, size); 621 622 /* Corner where VMA area struct has been partially unmapped */ 623 if (start < vas->vm_start) 624 start = vas->vm_start; 625 if (end > vas->vm_end) 626 end = vas->vm_end; 627 628 migrate.start = start; 629 migrate.end = end; 630 npages = npages_in_range(start, end); 631 632 buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) + 633 sizeof(*pages), GFP_KERNEL); 634 if (!buf) { 635 err = -ENOMEM; 636 goto err_out; 637 } 638 dma_addr = buf + (2 * sizeof(*migrate.src) * npages); 639 pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages; 640 641 migrate.vma = vas; 642 migrate.src = buf; 643 migrate.dst = migrate.src + npages; 644 645 err = migrate_vma_setup(&migrate); 646 if (err) 647 goto err_free; 648 649 /* Raced with another CPU fault, nothing to do */ 650 if (!migrate.cpages) 651 goto err_free; 652 653 if (!page) { 654 for (i = 0; i < npages; ++i) { 655 if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE)) 656 continue; 657 658 page = migrate_pfn_to_page(migrate.src[i]); 659 break; 660 } 661 662 if (!page) 663 goto err_finalize; 664 } 665 zdd = page->zone_device_data; 666 ops = zdd->devmem_allocation->ops; 667 dev = zdd->devmem_allocation->dev; 668 669 err = drm_pagemap_migrate_populate_ram_pfn(vas, page, npages, &mpages, 670 migrate.src, migrate.dst, 671 start); 672 if (err) 673 goto err_finalize; 674 675 err = drm_pagemap_migrate_map_pages(dev, dma_addr, migrate.dst, npages, 676 DMA_FROM_DEVICE); 677 if (err) 678 goto err_finalize; 679 680 for (i = 0; i < npages; ++i) 681 pages[i] = migrate_pfn_to_page(migrate.src[i]); 682 683 err = ops->copy_to_ram(pages, dma_addr, npages); 684 if (err) 685 goto err_finalize; 686 687 err_finalize: 688 if (err) 689 drm_pagemap_migration_unlock_put_pages(npages, migrate.dst); 690 migrate_vma_pages(&migrate); 691 migrate_vma_finalize(&migrate); 692 if (dev) 693 drm_pagemap_migrate_unmap_pages(dev, dma_addr, npages, 694 DMA_FROM_DEVICE); 695 err_free: 696 kvfree(buf); 697 err_out: 698 699 return err; 700 } 701 702 /** 703 * drm_pagemap_page_free() - Put GPU SVM zone device data associated with a page 704 * @page: Pointer to the page 705 * 706 * This function is a callback used to put the GPU SVM zone device data 707 * associated with a page when it is being released. 708 */ 709 static void drm_pagemap_page_free(struct page *page) 710 { 711 drm_pagemap_zdd_put(page->zone_device_data); 712 } 713 714 /** 715 * drm_pagemap_migrate_to_ram() - Migrate a virtual range to RAM (page fault handler) 716 * @vmf: Pointer to the fault information structure 717 * 718 * This function is a page fault handler used to migrate a virtual range 719 * to ram. The device memory allocation in which the device page is found is 720 * migrated in its entirety. 721 * 722 * Returns: 723 * VM_FAULT_SIGBUS on failure, 0 on success. 724 */ 725 static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf) 726 { 727 struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data; 728 int err; 729 730 err = __drm_pagemap_migrate_to_ram(vmf->vma, 731 zdd->device_private_page_owner, 732 vmf->page, vmf->address, 733 zdd->devmem_allocation->size); 734 735 return err ? VM_FAULT_SIGBUS : 0; 736 } 737 738 static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = { 739 .page_free = drm_pagemap_page_free, 740 .migrate_to_ram = drm_pagemap_migrate_to_ram, 741 }; 742 743 /** 744 * drm_pagemap_pagemap_ops_get() - Retrieve GPU SVM device page map operations 745 * 746 * Returns: 747 * Pointer to the GPU SVM device page map operations structure. 748 */ 749 const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void) 750 { 751 return &drm_pagemap_pagemap_ops; 752 } 753 EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get); 754 755 /** 756 * drm_pagemap_devmem_init() - Initialize a drm_pagemap device memory allocation 757 * 758 * @devmem_allocation: The struct drm_pagemap_devmem to initialize. 759 * @dev: Pointer to the device structure which device memory allocation belongs to 760 * @mm: Pointer to the mm_struct for the address space 761 * @ops: Pointer to the operations structure for GPU SVM device memory 762 * @dpagemap: The struct drm_pagemap we're allocating from. 763 * @size: Size of device memory allocation 764 */ 765 void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation, 766 struct device *dev, struct mm_struct *mm, 767 const struct drm_pagemap_devmem_ops *ops, 768 struct drm_pagemap *dpagemap, size_t size) 769 { 770 init_completion(&devmem_allocation->detached); 771 devmem_allocation->dev = dev; 772 devmem_allocation->mm = mm; 773 devmem_allocation->ops = ops; 774 devmem_allocation->dpagemap = dpagemap; 775 devmem_allocation->size = size; 776 } 777 EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init); 778 779 /** 780 * drm_pagemap_page_to_dpagemap() - Return a pointer the drm_pagemap of a page 781 * @page: The struct page. 782 * 783 * Return: A pointer to the struct drm_pagemap of a device private page that 784 * was populated from the struct drm_pagemap. If the page was *not* populated 785 * from a struct drm_pagemap, the result is undefined and the function call 786 * may result in dereferencing and invalid address. 787 */ 788 struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page) 789 { 790 struct drm_pagemap_zdd *zdd = page->zone_device_data; 791 792 return zdd->devmem_allocation->dpagemap; 793 } 794 EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap); 795