1 // SPDX-License-Identifier: GPL-2.0-only OR MIT 2 /* 3 * Copyright © 2024 Intel Corporation 4 * 5 * Authors: 6 * Matthew Brost <matthew.brost@intel.com> 7 */ 8 9 #include <linux/dma-mapping.h> 10 #include <linux/hmm.h> 11 #include <linux/memremap.h> 12 #include <linux/migrate.h> 13 #include <linux/mm_types.h> 14 #include <linux/pagemap.h> 15 #include <linux/slab.h> 16 17 #include <drm/drm_device.h> 18 #include <drm/drm_gpusvm.h> 19 #include <drm/drm_pagemap.h> 20 #include <drm/drm_print.h> 21 22 /** 23 * DOC: Overview 24 * 25 * GPU Shared Virtual Memory (GPU SVM) layer for the Direct Rendering Manager (DRM) 26 * is a component of the DRM framework designed to manage shared virtual memory 27 * between the CPU and GPU. It enables efficient data exchange and processing 28 * for GPU-accelerated applications by allowing memory sharing and 29 * synchronization between the CPU's and GPU's virtual address spaces. 30 * 31 * Key GPU SVM Components: 32 * 33 * - Notifiers: 34 * Used for tracking memory intervals and notifying the GPU of changes, 35 * notifiers are sized based on a GPU SVM initialization parameter, with a 36 * recommendation of 512M or larger. They maintain a Red-BlacK tree and a 37 * list of ranges that fall within the notifier interval. Notifiers are 38 * tracked within a GPU SVM Red-BlacK tree and list and are dynamically 39 * inserted or removed as ranges within the interval are created or 40 * destroyed. 41 * - Ranges: 42 * Represent memory ranges mapped in a DRM device and managed by GPU SVM. 43 * They are sized based on an array of chunk sizes, which is a GPU SVM 44 * initialization parameter, and the CPU address space. Upon GPU fault, 45 * the largest aligned chunk that fits within the faulting CPU address 46 * space is chosen for the range size. Ranges are expected to be 47 * dynamically allocated on GPU fault and removed on an MMU notifier UNMAP 48 * event. As mentioned above, ranges are tracked in a notifier's Red-Black 49 * tree. 50 * 51 * - Operations: 52 * Define the interface for driver-specific GPU SVM operations such as 53 * range allocation, notifier allocation, and invalidations. 54 * 55 * - Device Memory Allocations: 56 * Embedded structure containing enough information for GPU SVM to migrate 57 * to / from device memory. 58 * 59 * - Device Memory Operations: 60 * Define the interface for driver-specific device memory operations 61 * release memory, populate pfns, and copy to / from device memory. 62 * 63 * This layer provides interfaces for allocating, mapping, migrating, and 64 * releasing memory ranges between the CPU and GPU. It handles all core memory 65 * management interactions (DMA mapping, HMM, and migration) and provides 66 * driver-specific virtual functions (vfuncs). This infrastructure is sufficient 67 * to build the expected driver components for an SVM implementation as detailed 68 * below. 69 * 70 * Expected Driver Components: 71 * 72 * - GPU page fault handler: 73 * Used to create ranges and notifiers based on the fault address, 74 * optionally migrate the range to device memory, and create GPU bindings. 75 * 76 * - Garbage collector: 77 * Used to unmap and destroy GPU bindings for ranges. Ranges are expected 78 * to be added to the garbage collector upon a MMU_NOTIFY_UNMAP event in 79 * notifier callback. 80 * 81 * - Notifier callback: 82 * Used to invalidate and DMA unmap GPU bindings for ranges. 83 */ 84 85 /** 86 * DOC: Locking 87 * 88 * GPU SVM handles locking for core MM interactions, i.e., it locks/unlocks the 89 * mmap lock as needed. 90 * 91 * GPU SVM introduces a global notifier lock, which safeguards the notifier's 92 * range RB tree and list, as well as the range's DMA mappings and sequence 93 * number. GPU SVM manages all necessary locking and unlocking operations, 94 * except for the recheck range's pages being valid 95 * (drm_gpusvm_range_pages_valid) when the driver is committing GPU bindings. 96 * This lock corresponds to the ``driver->update`` lock mentioned in 97 * Documentation/mm/hmm.rst. Future revisions may transition from a GPU SVM 98 * global lock to a per-notifier lock if finer-grained locking is deemed 99 * necessary. 100 * 101 * In addition to the locking mentioned above, the driver should implement a 102 * lock to safeguard core GPU SVM function calls that modify state, such as 103 * drm_gpusvm_range_find_or_insert and drm_gpusvm_range_remove. This lock is 104 * denoted as 'driver_svm_lock' in code examples. Finer grained driver side 105 * locking should also be possible for concurrent GPU fault processing within a 106 * single GPU SVM. The 'driver_svm_lock' can be via drm_gpusvm_driver_set_lock 107 * to add annotations to GPU SVM. 108 */ 109 110 /** 111 * DOC: Migration 112 * 113 * The migration support is quite simple, allowing migration between RAM and 114 * device memory at the range granularity. For example, GPU SVM currently does 115 * not support mixing RAM and device memory pages within a range. This means 116 * that upon GPU fault, the entire range can be migrated to device memory, and 117 * upon CPU fault, the entire range is migrated to RAM. Mixed RAM and device 118 * memory storage within a range could be added in the future if required. 119 * 120 * The reasoning for only supporting range granularity is as follows: it 121 * simplifies the implementation, and range sizes are driver-defined and should 122 * be relatively small. 123 */ 124 125 /** 126 * DOC: Partial Unmapping of Ranges 127 * 128 * Partial unmapping of ranges (e.g., 1M out of 2M is unmapped by CPU resulting 129 * in MMU_NOTIFY_UNMAP event) presents several challenges, with the main one 130 * being that a subset of the range still has CPU and GPU mappings. If the 131 * backing store for the range is in device memory, a subset of the backing 132 * store has references. One option would be to split the range and device 133 * memory backing store, but the implementation for this would be quite 134 * complicated. Given that partial unmappings are rare and driver-defined range 135 * sizes are relatively small, GPU SVM does not support splitting of ranges. 136 * 137 * With no support for range splitting, upon partial unmapping of a range, the 138 * driver is expected to invalidate and destroy the entire range. If the range 139 * has device memory as its backing, the driver is also expected to migrate any 140 * remaining pages back to RAM. 141 */ 142 143 /** 144 * DOC: Examples 145 * 146 * This section provides three examples of how to build the expected driver 147 * components: the GPU page fault handler, the garbage collector, and the 148 * notifier callback. 149 * 150 * The generic code provided does not include logic for complex migration 151 * policies, optimized invalidations, fined grained driver locking, or other 152 * potentially required driver locking (e.g., DMA-resv locks). 153 * 154 * 1) GPU page fault handler 155 * 156 * .. code-block:: c 157 * 158 * int driver_bind_range(struct drm_gpusvm *gpusvm, struct drm_gpusvm_range *range) 159 * { 160 * int err = 0; 161 * 162 * driver_alloc_and_setup_memory_for_bind(gpusvm, range); 163 * 164 * drm_gpusvm_notifier_lock(gpusvm); 165 * if (drm_gpusvm_range_pages_valid(range)) 166 * driver_commit_bind(gpusvm, range); 167 * else 168 * err = -EAGAIN; 169 * drm_gpusvm_notifier_unlock(gpusvm); 170 * 171 * return err; 172 * } 173 * 174 * int driver_gpu_fault(struct drm_gpusvm *gpusvm, unsigned long fault_addr, 175 * unsigned long gpuva_start, unsigned long gpuva_end) 176 * { 177 * struct drm_gpusvm_ctx ctx = {}; 178 * int err; 179 * 180 * driver_svm_lock(); 181 * retry: 182 * // Always process UNMAPs first so view of GPU SVM ranges is current 183 * driver_garbage_collector(gpusvm); 184 * 185 * range = drm_gpusvm_range_find_or_insert(gpusvm, fault_addr, 186 * gpuva_start, gpuva_end, 187 * &ctx); 188 * if (IS_ERR(range)) { 189 * err = PTR_ERR(range); 190 * goto unlock; 191 * } 192 * 193 * if (driver_migration_policy(range)) { 194 * mmap_read_lock(mm); 195 * devmem = driver_alloc_devmem(); 196 * err = drm_gpusvm_migrate_to_devmem(gpusvm, range, 197 * devmem_allocation, 198 * &ctx); 199 * mmap_read_unlock(mm); 200 * if (err) // CPU mappings may have changed 201 * goto retry; 202 * } 203 * 204 * err = drm_gpusvm_range_get_pages(gpusvm, range, &ctx); 205 * if (err == -EOPNOTSUPP || err == -EFAULT || err == -EPERM) { // CPU mappings changed 206 * if (err == -EOPNOTSUPP) 207 * drm_gpusvm_range_evict(gpusvm, range); 208 * goto retry; 209 * } else if (err) { 210 * goto unlock; 211 * } 212 * 213 * err = driver_bind_range(gpusvm, range); 214 * if (err == -EAGAIN) // CPU mappings changed 215 * goto retry 216 * 217 * unlock: 218 * driver_svm_unlock(); 219 * return err; 220 * } 221 * 222 * 2) Garbage Collector 223 * 224 * .. code-block:: c 225 * 226 * void __driver_garbage_collector(struct drm_gpusvm *gpusvm, 227 * struct drm_gpusvm_range *range) 228 * { 229 * assert_driver_svm_locked(gpusvm); 230 * 231 * // Partial unmap, migrate any remaining device memory pages back to RAM 232 * if (range->flags.partial_unmap) 233 * drm_gpusvm_range_evict(gpusvm, range); 234 * 235 * driver_unbind_range(range); 236 * drm_gpusvm_range_remove(gpusvm, range); 237 * } 238 * 239 * void driver_garbage_collector(struct drm_gpusvm *gpusvm) 240 * { 241 * assert_driver_svm_locked(gpusvm); 242 * 243 * for_each_range_in_garbage_collector(gpusvm, range) 244 * __driver_garbage_collector(gpusvm, range); 245 * } 246 * 247 * 3) Notifier callback 248 * 249 * .. code-block:: c 250 * 251 * void driver_invalidation(struct drm_gpusvm *gpusvm, 252 * struct drm_gpusvm_notifier *notifier, 253 * const struct mmu_notifier_range *mmu_range) 254 * { 255 * struct drm_gpusvm_ctx ctx = { .in_notifier = true, }; 256 * struct drm_gpusvm_range *range = NULL; 257 * 258 * driver_invalidate_device_pages(gpusvm, mmu_range->start, mmu_range->end); 259 * 260 * drm_gpusvm_for_each_range(range, notifier, mmu_range->start, 261 * mmu_range->end) { 262 * drm_gpusvm_range_unmap_pages(gpusvm, range, &ctx); 263 * 264 * if (mmu_range->event != MMU_NOTIFY_UNMAP) 265 * continue; 266 * 267 * drm_gpusvm_range_set_unmapped(range, mmu_range); 268 * driver_garbage_collector_add(gpusvm, range); 269 * } 270 * } 271 */ 272 273 /** 274 * npages_in_range() - Calculate the number of pages in a given range 275 * @start: The start address of the range 276 * @end: The end address of the range 277 * 278 * This macro calculates the number of pages in a given memory range, 279 * specified by the start and end addresses. It divides the difference 280 * between the end and start addresses by the page size (PAGE_SIZE) to 281 * determine the number of pages in the range. 282 * 283 * Return: The number of pages in the specified range. 284 */ 285 static unsigned long 286 npages_in_range(unsigned long start, unsigned long end) 287 { 288 return (end - start) >> PAGE_SHIFT; 289 } 290 291 /** 292 * struct drm_gpusvm_zdd - GPU SVM zone device data 293 * 294 * @refcount: Reference count for the zdd 295 * @devmem_allocation: device memory allocation 296 * @device_private_page_owner: Device private pages owner 297 * 298 * This structure serves as a generic wrapper installed in 299 * page->zone_device_data. It provides infrastructure for looking up a device 300 * memory allocation upon CPU page fault and asynchronously releasing device 301 * memory once the CPU has no page references. Asynchronous release is useful 302 * because CPU page references can be dropped in IRQ contexts, while releasing 303 * device memory likely requires sleeping locks. 304 */ 305 struct drm_gpusvm_zdd { 306 struct kref refcount; 307 struct drm_gpusvm_devmem *devmem_allocation; 308 void *device_private_page_owner; 309 }; 310 311 /** 312 * drm_gpusvm_zdd_alloc() - Allocate a zdd structure. 313 * @device_private_page_owner: Device private pages owner 314 * 315 * This function allocates and initializes a new zdd structure. It sets up the 316 * reference count and initializes the destroy work. 317 * 318 * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure. 319 */ 320 static struct drm_gpusvm_zdd * 321 drm_gpusvm_zdd_alloc(void *device_private_page_owner) 322 { 323 struct drm_gpusvm_zdd *zdd; 324 325 zdd = kmalloc(sizeof(*zdd), GFP_KERNEL); 326 if (!zdd) 327 return NULL; 328 329 kref_init(&zdd->refcount); 330 zdd->devmem_allocation = NULL; 331 zdd->device_private_page_owner = device_private_page_owner; 332 333 return zdd; 334 } 335 336 /** 337 * drm_gpusvm_zdd_get() - Get a reference to a zdd structure. 338 * @zdd: Pointer to the zdd structure. 339 * 340 * This function increments the reference count of the provided zdd structure. 341 * 342 * Return: Pointer to the zdd structure. 343 */ 344 static struct drm_gpusvm_zdd *drm_gpusvm_zdd_get(struct drm_gpusvm_zdd *zdd) 345 { 346 kref_get(&zdd->refcount); 347 return zdd; 348 } 349 350 /** 351 * drm_gpusvm_zdd_destroy() - Destroy a zdd structure. 352 * @ref: Pointer to the reference count structure. 353 * 354 * This function queues the destroy_work of the zdd for asynchronous destruction. 355 */ 356 static void drm_gpusvm_zdd_destroy(struct kref *ref) 357 { 358 struct drm_gpusvm_zdd *zdd = 359 container_of(ref, struct drm_gpusvm_zdd, refcount); 360 struct drm_gpusvm_devmem *devmem = zdd->devmem_allocation; 361 362 if (devmem) { 363 complete_all(&devmem->detached); 364 if (devmem->ops->devmem_release) 365 devmem->ops->devmem_release(devmem); 366 } 367 kfree(zdd); 368 } 369 370 /** 371 * drm_gpusvm_zdd_put() - Put a zdd reference. 372 * @zdd: Pointer to the zdd structure. 373 * 374 * This function decrements the reference count of the provided zdd structure 375 * and schedules its destruction if the count drops to zero. 376 */ 377 static void drm_gpusvm_zdd_put(struct drm_gpusvm_zdd *zdd) 378 { 379 kref_put(&zdd->refcount, drm_gpusvm_zdd_destroy); 380 } 381 382 /** 383 * drm_gpusvm_range_find() - Find GPU SVM range from GPU SVM notifier 384 * @notifier: Pointer to the GPU SVM notifier structure. 385 * @start: Start address of the range 386 * @end: End address of the range 387 * 388 * Return: A pointer to the drm_gpusvm_range if found or NULL 389 */ 390 struct drm_gpusvm_range * 391 drm_gpusvm_range_find(struct drm_gpusvm_notifier *notifier, unsigned long start, 392 unsigned long end) 393 { 394 struct interval_tree_node *itree; 395 396 itree = interval_tree_iter_first(¬ifier->root, start, end - 1); 397 398 if (itree) 399 return container_of(itree, struct drm_gpusvm_range, itree); 400 else 401 return NULL; 402 } 403 EXPORT_SYMBOL_GPL(drm_gpusvm_range_find); 404 405 /** 406 * drm_gpusvm_for_each_range_safe() - Safely iterate over GPU SVM ranges in a notifier 407 * @range__: Iterator variable for the ranges 408 * @next__: Iterator variable for the ranges temporay storage 409 * @notifier__: Pointer to the GPU SVM notifier 410 * @start__: Start address of the range 411 * @end__: End address of the range 412 * 413 * This macro is used to iterate over GPU SVM ranges in a notifier while 414 * removing ranges from it. 415 */ 416 #define drm_gpusvm_for_each_range_safe(range__, next__, notifier__, start__, end__) \ 417 for ((range__) = drm_gpusvm_range_find((notifier__), (start__), (end__)), \ 418 (next__) = __drm_gpusvm_range_next(range__); \ 419 (range__) && (drm_gpusvm_range_start(range__) < (end__)); \ 420 (range__) = (next__), (next__) = __drm_gpusvm_range_next(range__)) 421 422 /** 423 * __drm_gpusvm_notifier_next() - get the next drm_gpusvm_notifier in the list 424 * @notifier: a pointer to the current drm_gpusvm_notifier 425 * 426 * Return: A pointer to the next drm_gpusvm_notifier if available, or NULL if 427 * the current notifier is the last one or if the input notifier is 428 * NULL. 429 */ 430 static struct drm_gpusvm_notifier * 431 __drm_gpusvm_notifier_next(struct drm_gpusvm_notifier *notifier) 432 { 433 if (notifier && !list_is_last(¬ifier->entry, 434 ¬ifier->gpusvm->notifier_list)) 435 return list_next_entry(notifier, entry); 436 437 return NULL; 438 } 439 440 static struct drm_gpusvm_notifier * 441 notifier_iter_first(struct rb_root_cached *root, unsigned long start, 442 unsigned long last) 443 { 444 struct interval_tree_node *itree; 445 446 itree = interval_tree_iter_first(root, start, last); 447 448 if (itree) 449 return container_of(itree, struct drm_gpusvm_notifier, itree); 450 else 451 return NULL; 452 } 453 454 /** 455 * drm_gpusvm_for_each_notifier() - Iterate over GPU SVM notifiers in a gpusvm 456 * @notifier__: Iterator variable for the notifiers 457 * @notifier__: Pointer to the GPU SVM notifier 458 * @start__: Start address of the notifier 459 * @end__: End address of the notifier 460 * 461 * This macro is used to iterate over GPU SVM notifiers in a gpusvm. 462 */ 463 #define drm_gpusvm_for_each_notifier(notifier__, gpusvm__, start__, end__) \ 464 for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1); \ 465 (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ 466 (notifier__) = __drm_gpusvm_notifier_next(notifier__)) 467 468 /** 469 * drm_gpusvm_for_each_notifier_safe() - Safely iterate over GPU SVM notifiers in a gpusvm 470 * @notifier__: Iterator variable for the notifiers 471 * @next__: Iterator variable for the notifiers temporay storage 472 * @notifier__: Pointer to the GPU SVM notifier 473 * @start__: Start address of the notifier 474 * @end__: End address of the notifier 475 * 476 * This macro is used to iterate over GPU SVM notifiers in a gpusvm while 477 * removing notifiers from it. 478 */ 479 #define drm_gpusvm_for_each_notifier_safe(notifier__, next__, gpusvm__, start__, end__) \ 480 for ((notifier__) = notifier_iter_first(&(gpusvm__)->root, (start__), (end__) - 1), \ 481 (next__) = __drm_gpusvm_notifier_next(notifier__); \ 482 (notifier__) && (drm_gpusvm_notifier_start(notifier__) < (end__)); \ 483 (notifier__) = (next__), (next__) = __drm_gpusvm_notifier_next(notifier__)) 484 485 /** 486 * drm_gpusvm_notifier_invalidate() - Invalidate a GPU SVM notifier. 487 * @mni: Pointer to the mmu_interval_notifier structure. 488 * @mmu_range: Pointer to the mmu_notifier_range structure. 489 * @cur_seq: Current sequence number. 490 * 491 * This function serves as a generic MMU notifier for GPU SVM. It sets the MMU 492 * notifier sequence number and calls the driver invalidate vfunc under 493 * gpusvm->notifier_lock. 494 * 495 * Return: true if the operation succeeds, false otherwise. 496 */ 497 static bool 498 drm_gpusvm_notifier_invalidate(struct mmu_interval_notifier *mni, 499 const struct mmu_notifier_range *mmu_range, 500 unsigned long cur_seq) 501 { 502 struct drm_gpusvm_notifier *notifier = 503 container_of(mni, typeof(*notifier), notifier); 504 struct drm_gpusvm *gpusvm = notifier->gpusvm; 505 506 if (!mmu_notifier_range_blockable(mmu_range)) 507 return false; 508 509 down_write(&gpusvm->notifier_lock); 510 mmu_interval_set_seq(mni, cur_seq); 511 gpusvm->ops->invalidate(gpusvm, notifier, mmu_range); 512 up_write(&gpusvm->notifier_lock); 513 514 return true; 515 } 516 517 /* 518 * drm_gpusvm_notifier_ops - MMU interval notifier operations for GPU SVM 519 */ 520 static const struct mmu_interval_notifier_ops drm_gpusvm_notifier_ops = { 521 .invalidate = drm_gpusvm_notifier_invalidate, 522 }; 523 524 /** 525 * drm_gpusvm_init() - Initialize the GPU SVM. 526 * @gpusvm: Pointer to the GPU SVM structure. 527 * @name: Name of the GPU SVM. 528 * @drm: Pointer to the DRM device structure. 529 * @mm: Pointer to the mm_struct for the address space. 530 * @device_private_page_owner: Device private pages owner. 531 * @mm_start: Start address of GPU SVM. 532 * @mm_range: Range of the GPU SVM. 533 * @notifier_size: Size of individual notifiers. 534 * @ops: Pointer to the operations structure for GPU SVM. 535 * @chunk_sizes: Pointer to the array of chunk sizes used in range allocation. 536 * Entries should be powers of 2 in descending order with last 537 * entry being SZ_4K. 538 * @num_chunks: Number of chunks. 539 * 540 * This function initializes the GPU SVM. 541 * 542 * Return: 0 on success, a negative error code on failure. 543 */ 544 int drm_gpusvm_init(struct drm_gpusvm *gpusvm, 545 const char *name, struct drm_device *drm, 546 struct mm_struct *mm, void *device_private_page_owner, 547 unsigned long mm_start, unsigned long mm_range, 548 unsigned long notifier_size, 549 const struct drm_gpusvm_ops *ops, 550 const unsigned long *chunk_sizes, int num_chunks) 551 { 552 if (!ops->invalidate || !num_chunks) 553 return -EINVAL; 554 555 gpusvm->name = name; 556 gpusvm->drm = drm; 557 gpusvm->mm = mm; 558 gpusvm->device_private_page_owner = device_private_page_owner; 559 gpusvm->mm_start = mm_start; 560 gpusvm->mm_range = mm_range; 561 gpusvm->notifier_size = notifier_size; 562 gpusvm->ops = ops; 563 gpusvm->chunk_sizes = chunk_sizes; 564 gpusvm->num_chunks = num_chunks; 565 566 mmgrab(mm); 567 gpusvm->root = RB_ROOT_CACHED; 568 INIT_LIST_HEAD(&gpusvm->notifier_list); 569 570 init_rwsem(&gpusvm->notifier_lock); 571 572 fs_reclaim_acquire(GFP_KERNEL); 573 might_lock(&gpusvm->notifier_lock); 574 fs_reclaim_release(GFP_KERNEL); 575 576 #ifdef CONFIG_LOCKDEP 577 gpusvm->lock_dep_map = NULL; 578 #endif 579 580 return 0; 581 } 582 EXPORT_SYMBOL_GPL(drm_gpusvm_init); 583 584 /** 585 * drm_gpusvm_notifier_find() - Find GPU SVM notifier 586 * @gpusvm: Pointer to the GPU SVM structure 587 * @fault_addr: Fault address 588 * 589 * This function finds the GPU SVM notifier associated with the fault address. 590 * 591 * Return: Pointer to the GPU SVM notifier on success, NULL otherwise. 592 */ 593 static struct drm_gpusvm_notifier * 594 drm_gpusvm_notifier_find(struct drm_gpusvm *gpusvm, 595 unsigned long fault_addr) 596 { 597 return notifier_iter_first(&gpusvm->root, fault_addr, fault_addr + 1); 598 } 599 600 /** 601 * to_drm_gpusvm_notifier() - retrieve the container struct for a given rbtree node 602 * @node: a pointer to the rbtree node embedded within a drm_gpusvm_notifier struct 603 * 604 * Return: A pointer to the containing drm_gpusvm_notifier structure. 605 */ 606 static struct drm_gpusvm_notifier *to_drm_gpusvm_notifier(struct rb_node *node) 607 { 608 return container_of(node, struct drm_gpusvm_notifier, itree.rb); 609 } 610 611 /** 612 * drm_gpusvm_notifier_insert() - Insert GPU SVM notifier 613 * @gpusvm: Pointer to the GPU SVM structure 614 * @notifier: Pointer to the GPU SVM notifier structure 615 * 616 * This function inserts the GPU SVM notifier into the GPU SVM RB tree and list. 617 */ 618 static void drm_gpusvm_notifier_insert(struct drm_gpusvm *gpusvm, 619 struct drm_gpusvm_notifier *notifier) 620 { 621 struct rb_node *node; 622 struct list_head *head; 623 624 interval_tree_insert(¬ifier->itree, &gpusvm->root); 625 626 node = rb_prev(¬ifier->itree.rb); 627 if (node) 628 head = &(to_drm_gpusvm_notifier(node))->entry; 629 else 630 head = &gpusvm->notifier_list; 631 632 list_add(¬ifier->entry, head); 633 } 634 635 /** 636 * drm_gpusvm_notifier_remove() - Remove GPU SVM notifier 637 * @gpusvm: Pointer to the GPU SVM tructure 638 * @notifier: Pointer to the GPU SVM notifier structure 639 * 640 * This function removes the GPU SVM notifier from the GPU SVM RB tree and list. 641 */ 642 static void drm_gpusvm_notifier_remove(struct drm_gpusvm *gpusvm, 643 struct drm_gpusvm_notifier *notifier) 644 { 645 interval_tree_remove(¬ifier->itree, &gpusvm->root); 646 list_del(¬ifier->entry); 647 } 648 649 /** 650 * drm_gpusvm_fini() - Finalize the GPU SVM. 651 * @gpusvm: Pointer to the GPU SVM structure. 652 * 653 * This function finalizes the GPU SVM by cleaning up any remaining ranges and 654 * notifiers, and dropping a reference to struct MM. 655 */ 656 void drm_gpusvm_fini(struct drm_gpusvm *gpusvm) 657 { 658 struct drm_gpusvm_notifier *notifier, *next; 659 660 drm_gpusvm_for_each_notifier_safe(notifier, next, gpusvm, 0, LONG_MAX) { 661 struct drm_gpusvm_range *range, *__next; 662 663 /* 664 * Remove notifier first to avoid racing with any invalidation 665 */ 666 mmu_interval_notifier_remove(¬ifier->notifier); 667 notifier->flags.removed = true; 668 669 drm_gpusvm_for_each_range_safe(range, __next, notifier, 0, 670 LONG_MAX) 671 drm_gpusvm_range_remove(gpusvm, range); 672 } 673 674 mmdrop(gpusvm->mm); 675 WARN_ON(!RB_EMPTY_ROOT(&gpusvm->root.rb_root)); 676 } 677 EXPORT_SYMBOL_GPL(drm_gpusvm_fini); 678 679 /** 680 * drm_gpusvm_notifier_alloc() - Allocate GPU SVM notifier 681 * @gpusvm: Pointer to the GPU SVM structure 682 * @fault_addr: Fault address 683 * 684 * This function allocates and initializes the GPU SVM notifier structure. 685 * 686 * Return: Pointer to the allocated GPU SVM notifier on success, ERR_PTR() on failure. 687 */ 688 static struct drm_gpusvm_notifier * 689 drm_gpusvm_notifier_alloc(struct drm_gpusvm *gpusvm, unsigned long fault_addr) 690 { 691 struct drm_gpusvm_notifier *notifier; 692 693 if (gpusvm->ops->notifier_alloc) 694 notifier = gpusvm->ops->notifier_alloc(); 695 else 696 notifier = kzalloc(sizeof(*notifier), GFP_KERNEL); 697 698 if (!notifier) 699 return ERR_PTR(-ENOMEM); 700 701 notifier->gpusvm = gpusvm; 702 notifier->itree.start = ALIGN_DOWN(fault_addr, gpusvm->notifier_size); 703 notifier->itree.last = ALIGN(fault_addr + 1, gpusvm->notifier_size) - 1; 704 INIT_LIST_HEAD(¬ifier->entry); 705 notifier->root = RB_ROOT_CACHED; 706 INIT_LIST_HEAD(¬ifier->range_list); 707 708 return notifier; 709 } 710 711 /** 712 * drm_gpusvm_notifier_free() - Free GPU SVM notifier 713 * @gpusvm: Pointer to the GPU SVM structure 714 * @notifier: Pointer to the GPU SVM notifier structure 715 * 716 * This function frees the GPU SVM notifier structure. 717 */ 718 static void drm_gpusvm_notifier_free(struct drm_gpusvm *gpusvm, 719 struct drm_gpusvm_notifier *notifier) 720 { 721 WARN_ON(!RB_EMPTY_ROOT(¬ifier->root.rb_root)); 722 723 if (gpusvm->ops->notifier_free) 724 gpusvm->ops->notifier_free(notifier); 725 else 726 kfree(notifier); 727 } 728 729 /** 730 * to_drm_gpusvm_range() - retrieve the container struct for a given rbtree node 731 * @node: a pointer to the rbtree node embedded within a drm_gpusvm_range struct 732 * 733 * Return: A pointer to the containing drm_gpusvm_range structure. 734 */ 735 static struct drm_gpusvm_range *to_drm_gpusvm_range(struct rb_node *node) 736 { 737 return container_of(node, struct drm_gpusvm_range, itree.rb); 738 } 739 740 /** 741 * drm_gpusvm_range_insert() - Insert GPU SVM range 742 * @notifier: Pointer to the GPU SVM notifier structure 743 * @range: Pointer to the GPU SVM range structure 744 * 745 * This function inserts the GPU SVM range into the notifier RB tree and list. 746 */ 747 static void drm_gpusvm_range_insert(struct drm_gpusvm_notifier *notifier, 748 struct drm_gpusvm_range *range) 749 { 750 struct rb_node *node; 751 struct list_head *head; 752 753 drm_gpusvm_notifier_lock(notifier->gpusvm); 754 interval_tree_insert(&range->itree, ¬ifier->root); 755 756 node = rb_prev(&range->itree.rb); 757 if (node) 758 head = &(to_drm_gpusvm_range(node))->entry; 759 else 760 head = ¬ifier->range_list; 761 762 list_add(&range->entry, head); 763 drm_gpusvm_notifier_unlock(notifier->gpusvm); 764 } 765 766 /** 767 * __drm_gpusvm_range_remove() - Remove GPU SVM range 768 * @notifier: Pointer to the GPU SVM notifier structure 769 * @range: Pointer to the GPU SVM range structure 770 * 771 * This macro removes the GPU SVM range from the notifier RB tree and list. 772 */ 773 static void __drm_gpusvm_range_remove(struct drm_gpusvm_notifier *notifier, 774 struct drm_gpusvm_range *range) 775 { 776 interval_tree_remove(&range->itree, ¬ifier->root); 777 list_del(&range->entry); 778 } 779 780 /** 781 * drm_gpusvm_range_alloc() - Allocate GPU SVM range 782 * @gpusvm: Pointer to the GPU SVM structure 783 * @notifier: Pointer to the GPU SVM notifier structure 784 * @fault_addr: Fault address 785 * @chunk_size: Chunk size 786 * @migrate_devmem: Flag indicating whether to migrate device memory 787 * 788 * This function allocates and initializes the GPU SVM range structure. 789 * 790 * Return: Pointer to the allocated GPU SVM range on success, ERR_PTR() on failure. 791 */ 792 static struct drm_gpusvm_range * 793 drm_gpusvm_range_alloc(struct drm_gpusvm *gpusvm, 794 struct drm_gpusvm_notifier *notifier, 795 unsigned long fault_addr, unsigned long chunk_size, 796 bool migrate_devmem) 797 { 798 struct drm_gpusvm_range *range; 799 800 if (gpusvm->ops->range_alloc) 801 range = gpusvm->ops->range_alloc(gpusvm); 802 else 803 range = kzalloc(sizeof(*range), GFP_KERNEL); 804 805 if (!range) 806 return ERR_PTR(-ENOMEM); 807 808 kref_init(&range->refcount); 809 range->gpusvm = gpusvm; 810 range->notifier = notifier; 811 range->itree.start = ALIGN_DOWN(fault_addr, chunk_size); 812 range->itree.last = ALIGN(fault_addr + 1, chunk_size) - 1; 813 INIT_LIST_HEAD(&range->entry); 814 range->notifier_seq = LONG_MAX; 815 range->flags.migrate_devmem = migrate_devmem ? 1 : 0; 816 817 return range; 818 } 819 820 /** 821 * drm_gpusvm_check_pages() - Check pages 822 * @gpusvm: Pointer to the GPU SVM structure 823 * @notifier: Pointer to the GPU SVM notifier structure 824 * @start: Start address 825 * @end: End address 826 * 827 * Check if pages between start and end have been faulted in on the CPU. Use to 828 * prevent migration of pages without CPU backing store. 829 * 830 * Return: True if pages have been faulted into CPU, False otherwise 831 */ 832 static bool drm_gpusvm_check_pages(struct drm_gpusvm *gpusvm, 833 struct drm_gpusvm_notifier *notifier, 834 unsigned long start, unsigned long end) 835 { 836 struct hmm_range hmm_range = { 837 .default_flags = 0, 838 .notifier = ¬ifier->notifier, 839 .start = start, 840 .end = end, 841 .dev_private_owner = gpusvm->device_private_page_owner, 842 }; 843 unsigned long timeout = 844 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); 845 unsigned long *pfns; 846 unsigned long npages = npages_in_range(start, end); 847 int err, i; 848 849 mmap_assert_locked(gpusvm->mm); 850 851 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); 852 if (!pfns) 853 return false; 854 855 hmm_range.notifier_seq = mmu_interval_read_begin(¬ifier->notifier); 856 hmm_range.hmm_pfns = pfns; 857 858 while (true) { 859 err = hmm_range_fault(&hmm_range); 860 if (err == -EBUSY) { 861 if (time_after(jiffies, timeout)) 862 break; 863 864 hmm_range.notifier_seq = 865 mmu_interval_read_begin(¬ifier->notifier); 866 continue; 867 } 868 break; 869 } 870 if (err) 871 goto err_free; 872 873 for (i = 0; i < npages;) { 874 if (!(pfns[i] & HMM_PFN_VALID)) { 875 err = -EFAULT; 876 goto err_free; 877 } 878 i += 0x1 << hmm_pfn_to_map_order(pfns[i]); 879 } 880 881 err_free: 882 kvfree(pfns); 883 return err ? false : true; 884 } 885 886 /** 887 * drm_gpusvm_range_chunk_size() - Determine chunk size for GPU SVM range 888 * @gpusvm: Pointer to the GPU SVM structure 889 * @notifier: Pointer to the GPU SVM notifier structure 890 * @vas: Pointer to the virtual memory area structure 891 * @fault_addr: Fault address 892 * @gpuva_start: Start address of GPUVA which mirrors CPU 893 * @gpuva_end: End address of GPUVA which mirrors CPU 894 * @check_pages_threshold: Check CPU pages for present threshold 895 * 896 * This function determines the chunk size for the GPU SVM range based on the 897 * fault address, GPU SVM chunk sizes, existing GPU SVM ranges, and the virtual 898 * memory area boundaries. 899 * 900 * Return: Chunk size on success, LONG_MAX on failure. 901 */ 902 static unsigned long 903 drm_gpusvm_range_chunk_size(struct drm_gpusvm *gpusvm, 904 struct drm_gpusvm_notifier *notifier, 905 struct vm_area_struct *vas, 906 unsigned long fault_addr, 907 unsigned long gpuva_start, 908 unsigned long gpuva_end, 909 unsigned long check_pages_threshold) 910 { 911 unsigned long start, end; 912 int i = 0; 913 914 retry: 915 for (; i < gpusvm->num_chunks; ++i) { 916 start = ALIGN_DOWN(fault_addr, gpusvm->chunk_sizes[i]); 917 end = ALIGN(fault_addr + 1, gpusvm->chunk_sizes[i]); 918 919 if (start >= vas->vm_start && end <= vas->vm_end && 920 start >= drm_gpusvm_notifier_start(notifier) && 921 end <= drm_gpusvm_notifier_end(notifier) && 922 start >= gpuva_start && end <= gpuva_end) 923 break; 924 } 925 926 if (i == gpusvm->num_chunks) 927 return LONG_MAX; 928 929 /* 930 * If allocation more than page, ensure not to overlap with existing 931 * ranges. 932 */ 933 if (end - start != SZ_4K) { 934 struct drm_gpusvm_range *range; 935 936 range = drm_gpusvm_range_find(notifier, start, end); 937 if (range) { 938 ++i; 939 goto retry; 940 } 941 942 /* 943 * XXX: Only create range on pages CPU has faulted in. Without 944 * this check, or prefault, on BMG 'xe_exec_system_allocator --r 945 * process-many-malloc' fails. In the failure case, each process 946 * mallocs 16k but the CPU VMA is ~128k which results in 64k SVM 947 * ranges. When migrating the SVM ranges, some processes fail in 948 * drm_gpusvm_migrate_to_devmem with 'migrate.cpages != npages' 949 * and then upon drm_gpusvm_range_get_pages device pages from 950 * other processes are collected + faulted in which creates all 951 * sorts of problems. Unsure exactly how this happening, also 952 * problem goes away if 'xe_exec_system_allocator --r 953 * process-many-malloc' mallocs at least 64k at a time. 954 */ 955 if (end - start <= check_pages_threshold && 956 !drm_gpusvm_check_pages(gpusvm, notifier, start, end)) { 957 ++i; 958 goto retry; 959 } 960 } 961 962 return end - start; 963 } 964 965 #ifdef CONFIG_LOCKDEP 966 /** 967 * drm_gpusvm_driver_lock_held() - Assert GPU SVM driver lock is held 968 * @gpusvm: Pointer to the GPU SVM structure. 969 * 970 * Ensure driver lock is held. 971 */ 972 static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm) 973 { 974 if ((gpusvm)->lock_dep_map) 975 lockdep_assert(lock_is_held_type((gpusvm)->lock_dep_map, 0)); 976 } 977 #else 978 static void drm_gpusvm_driver_lock_held(struct drm_gpusvm *gpusvm) 979 { 980 } 981 #endif 982 983 /** 984 * drm_gpusvm_range_find_or_insert() - Find or insert GPU SVM range 985 * @gpusvm: Pointer to the GPU SVM structure 986 * @fault_addr: Fault address 987 * @gpuva_start: Start address of GPUVA which mirrors CPU 988 * @gpuva_end: End address of GPUVA which mirrors CPU 989 * @ctx: GPU SVM context 990 * 991 * This function finds or inserts a newly allocated a GPU SVM range based on the 992 * fault address. Caller must hold a lock to protect range lookup and insertion. 993 * 994 * Return: Pointer to the GPU SVM range on success, ERR_PTR() on failure. 995 */ 996 struct drm_gpusvm_range * 997 drm_gpusvm_range_find_or_insert(struct drm_gpusvm *gpusvm, 998 unsigned long fault_addr, 999 unsigned long gpuva_start, 1000 unsigned long gpuva_end, 1001 const struct drm_gpusvm_ctx *ctx) 1002 { 1003 struct drm_gpusvm_notifier *notifier; 1004 struct drm_gpusvm_range *range; 1005 struct mm_struct *mm = gpusvm->mm; 1006 struct vm_area_struct *vas; 1007 bool notifier_alloc = false; 1008 unsigned long chunk_size; 1009 int err; 1010 bool migrate_devmem; 1011 1012 drm_gpusvm_driver_lock_held(gpusvm); 1013 1014 if (fault_addr < gpusvm->mm_start || 1015 fault_addr > gpusvm->mm_start + gpusvm->mm_range) 1016 return ERR_PTR(-EINVAL); 1017 1018 if (!mmget_not_zero(mm)) 1019 return ERR_PTR(-EFAULT); 1020 1021 notifier = drm_gpusvm_notifier_find(gpusvm, fault_addr); 1022 if (!notifier) { 1023 notifier = drm_gpusvm_notifier_alloc(gpusvm, fault_addr); 1024 if (IS_ERR(notifier)) { 1025 err = PTR_ERR(notifier); 1026 goto err_mmunlock; 1027 } 1028 notifier_alloc = true; 1029 err = mmu_interval_notifier_insert(¬ifier->notifier, 1030 mm, 1031 drm_gpusvm_notifier_start(notifier), 1032 drm_gpusvm_notifier_size(notifier), 1033 &drm_gpusvm_notifier_ops); 1034 if (err) 1035 goto err_notifier; 1036 } 1037 1038 mmap_read_lock(mm); 1039 1040 vas = vma_lookup(mm, fault_addr); 1041 if (!vas) { 1042 err = -ENOENT; 1043 goto err_notifier_remove; 1044 } 1045 1046 if (!ctx->read_only && !(vas->vm_flags & VM_WRITE)) { 1047 err = -EPERM; 1048 goto err_notifier_remove; 1049 } 1050 1051 range = drm_gpusvm_range_find(notifier, fault_addr, fault_addr + 1); 1052 if (range) 1053 goto out_mmunlock; 1054 /* 1055 * XXX: Short-circuiting migration based on migrate_vma_* current 1056 * limitations. If/when migrate_vma_* add more support, this logic will 1057 * have to change. 1058 */ 1059 migrate_devmem = ctx->devmem_possible && 1060 vma_is_anonymous(vas) && !is_vm_hugetlb_page(vas); 1061 1062 chunk_size = drm_gpusvm_range_chunk_size(gpusvm, notifier, vas, 1063 fault_addr, gpuva_start, 1064 gpuva_end, 1065 ctx->check_pages_threshold); 1066 if (chunk_size == LONG_MAX) { 1067 err = -EINVAL; 1068 goto err_notifier_remove; 1069 } 1070 1071 range = drm_gpusvm_range_alloc(gpusvm, notifier, fault_addr, chunk_size, 1072 migrate_devmem); 1073 if (IS_ERR(range)) { 1074 err = PTR_ERR(range); 1075 goto err_notifier_remove; 1076 } 1077 1078 drm_gpusvm_range_insert(notifier, range); 1079 if (notifier_alloc) 1080 drm_gpusvm_notifier_insert(gpusvm, notifier); 1081 1082 out_mmunlock: 1083 mmap_read_unlock(mm); 1084 mmput(mm); 1085 1086 return range; 1087 1088 err_notifier_remove: 1089 mmap_read_unlock(mm); 1090 if (notifier_alloc) 1091 mmu_interval_notifier_remove(¬ifier->notifier); 1092 err_notifier: 1093 if (notifier_alloc) 1094 drm_gpusvm_notifier_free(gpusvm, notifier); 1095 err_mmunlock: 1096 mmput(mm); 1097 return ERR_PTR(err); 1098 } 1099 EXPORT_SYMBOL_GPL(drm_gpusvm_range_find_or_insert); 1100 1101 /** 1102 * __drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range (internal) 1103 * @gpusvm: Pointer to the GPU SVM structure 1104 * @range: Pointer to the GPU SVM range structure 1105 * @npages: Number of pages to unmap 1106 * 1107 * This function unmap pages associated with a GPU SVM range. Assumes and 1108 * asserts correct locking is in place when called. 1109 */ 1110 static void __drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, 1111 struct drm_gpusvm_range *range, 1112 unsigned long npages) 1113 { 1114 unsigned long i, j; 1115 struct drm_pagemap *dpagemap = range->dpagemap; 1116 struct device *dev = gpusvm->drm->dev; 1117 1118 lockdep_assert_held(&gpusvm->notifier_lock); 1119 1120 if (range->flags.has_dma_mapping) { 1121 for (i = 0, j = 0; i < npages; j++) { 1122 struct drm_pagemap_device_addr *addr = &range->dma_addr[j]; 1123 1124 if (addr->proto == DRM_INTERCONNECT_SYSTEM) 1125 dma_unmap_page(dev, 1126 addr->addr, 1127 PAGE_SIZE << addr->order, 1128 addr->dir); 1129 else if (dpagemap && dpagemap->ops->device_unmap) 1130 dpagemap->ops->device_unmap(dpagemap, 1131 dev, *addr); 1132 i += 1 << addr->order; 1133 } 1134 range->flags.has_devmem_pages = false; 1135 range->flags.has_dma_mapping = false; 1136 range->dpagemap = NULL; 1137 } 1138 } 1139 1140 /** 1141 * drm_gpusvm_range_free_pages() - Free pages associated with a GPU SVM range 1142 * @gpusvm: Pointer to the GPU SVM structure 1143 * @range: Pointer to the GPU SVM range structure 1144 * 1145 * This function frees the dma address array associated with a GPU SVM range. 1146 */ 1147 static void drm_gpusvm_range_free_pages(struct drm_gpusvm *gpusvm, 1148 struct drm_gpusvm_range *range) 1149 { 1150 lockdep_assert_held(&gpusvm->notifier_lock); 1151 1152 if (range->dma_addr) { 1153 kvfree(range->dma_addr); 1154 range->dma_addr = NULL; 1155 } 1156 } 1157 1158 /** 1159 * drm_gpusvm_range_remove() - Remove GPU SVM range 1160 * @gpusvm: Pointer to the GPU SVM structure 1161 * @range: Pointer to the GPU SVM range to be removed 1162 * 1163 * This function removes the specified GPU SVM range and also removes the parent 1164 * GPU SVM notifier if no more ranges remain in the notifier. The caller must 1165 * hold a lock to protect range and notifier removal. 1166 */ 1167 void drm_gpusvm_range_remove(struct drm_gpusvm *gpusvm, 1168 struct drm_gpusvm_range *range) 1169 { 1170 unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), 1171 drm_gpusvm_range_end(range)); 1172 struct drm_gpusvm_notifier *notifier; 1173 1174 drm_gpusvm_driver_lock_held(gpusvm); 1175 1176 notifier = drm_gpusvm_notifier_find(gpusvm, 1177 drm_gpusvm_range_start(range)); 1178 if (WARN_ON_ONCE(!notifier)) 1179 return; 1180 1181 drm_gpusvm_notifier_lock(gpusvm); 1182 __drm_gpusvm_range_unmap_pages(gpusvm, range, npages); 1183 drm_gpusvm_range_free_pages(gpusvm, range); 1184 __drm_gpusvm_range_remove(notifier, range); 1185 drm_gpusvm_notifier_unlock(gpusvm); 1186 1187 drm_gpusvm_range_put(range); 1188 1189 if (RB_EMPTY_ROOT(¬ifier->root.rb_root)) { 1190 if (!notifier->flags.removed) 1191 mmu_interval_notifier_remove(¬ifier->notifier); 1192 drm_gpusvm_notifier_remove(gpusvm, notifier); 1193 drm_gpusvm_notifier_free(gpusvm, notifier); 1194 } 1195 } 1196 EXPORT_SYMBOL_GPL(drm_gpusvm_range_remove); 1197 1198 /** 1199 * drm_gpusvm_range_get() - Get a reference to GPU SVM range 1200 * @range: Pointer to the GPU SVM range 1201 * 1202 * This function increments the reference count of the specified GPU SVM range. 1203 * 1204 * Return: Pointer to the GPU SVM range. 1205 */ 1206 struct drm_gpusvm_range * 1207 drm_gpusvm_range_get(struct drm_gpusvm_range *range) 1208 { 1209 kref_get(&range->refcount); 1210 1211 return range; 1212 } 1213 EXPORT_SYMBOL_GPL(drm_gpusvm_range_get); 1214 1215 /** 1216 * drm_gpusvm_range_destroy() - Destroy GPU SVM range 1217 * @refcount: Pointer to the reference counter embedded in the GPU SVM range 1218 * 1219 * This function destroys the specified GPU SVM range when its reference count 1220 * reaches zero. If a custom range-free function is provided, it is invoked to 1221 * free the range; otherwise, the range is deallocated using kfree(). 1222 */ 1223 static void drm_gpusvm_range_destroy(struct kref *refcount) 1224 { 1225 struct drm_gpusvm_range *range = 1226 container_of(refcount, struct drm_gpusvm_range, refcount); 1227 struct drm_gpusvm *gpusvm = range->gpusvm; 1228 1229 if (gpusvm->ops->range_free) 1230 gpusvm->ops->range_free(range); 1231 else 1232 kfree(range); 1233 } 1234 1235 /** 1236 * drm_gpusvm_range_put() - Put a reference to GPU SVM range 1237 * @range: Pointer to the GPU SVM range 1238 * 1239 * This function decrements the reference count of the specified GPU SVM range 1240 * and frees it when the count reaches zero. 1241 */ 1242 void drm_gpusvm_range_put(struct drm_gpusvm_range *range) 1243 { 1244 kref_put(&range->refcount, drm_gpusvm_range_destroy); 1245 } 1246 EXPORT_SYMBOL_GPL(drm_gpusvm_range_put); 1247 1248 /** 1249 * drm_gpusvm_range_pages_valid() - GPU SVM range pages valid 1250 * @gpusvm: Pointer to the GPU SVM structure 1251 * @range: Pointer to the GPU SVM range structure 1252 * 1253 * This function determines if a GPU SVM range pages are valid. Expected be 1254 * called holding gpusvm->notifier_lock and as the last step before committing a 1255 * GPU binding. This is akin to a notifier seqno check in the HMM documentation 1256 * but due to wider notifiers (i.e., notifiers which span multiple ranges) this 1257 * function is required for finer grained checking (i.e., per range) if pages 1258 * are valid. 1259 * 1260 * Return: True if GPU SVM range has valid pages, False otherwise 1261 */ 1262 bool drm_gpusvm_range_pages_valid(struct drm_gpusvm *gpusvm, 1263 struct drm_gpusvm_range *range) 1264 { 1265 lockdep_assert_held(&gpusvm->notifier_lock); 1266 1267 return range->flags.has_devmem_pages || range->flags.has_dma_mapping; 1268 } 1269 EXPORT_SYMBOL_GPL(drm_gpusvm_range_pages_valid); 1270 1271 /** 1272 * drm_gpusvm_range_pages_valid_unlocked() - GPU SVM range pages valid unlocked 1273 * @gpusvm: Pointer to the GPU SVM structure 1274 * @range: Pointer to the GPU SVM range structure 1275 * 1276 * This function determines if a GPU SVM range pages are valid. Expected be 1277 * called without holding gpusvm->notifier_lock. 1278 * 1279 * Return: True if GPU SVM range has valid pages, False otherwise 1280 */ 1281 static bool 1282 drm_gpusvm_range_pages_valid_unlocked(struct drm_gpusvm *gpusvm, 1283 struct drm_gpusvm_range *range) 1284 { 1285 bool pages_valid; 1286 1287 if (!range->dma_addr) 1288 return false; 1289 1290 drm_gpusvm_notifier_lock(gpusvm); 1291 pages_valid = drm_gpusvm_range_pages_valid(gpusvm, range); 1292 if (!pages_valid) 1293 drm_gpusvm_range_free_pages(gpusvm, range); 1294 drm_gpusvm_notifier_unlock(gpusvm); 1295 1296 return pages_valid; 1297 } 1298 1299 /** 1300 * drm_gpusvm_range_get_pages() - Get pages for a GPU SVM range 1301 * @gpusvm: Pointer to the GPU SVM structure 1302 * @range: Pointer to the GPU SVM range structure 1303 * @ctx: GPU SVM context 1304 * 1305 * This function gets pages for a GPU SVM range and ensures they are mapped for 1306 * DMA access. 1307 * 1308 * Return: 0 on success, negative error code on failure. 1309 */ 1310 int drm_gpusvm_range_get_pages(struct drm_gpusvm *gpusvm, 1311 struct drm_gpusvm_range *range, 1312 const struct drm_gpusvm_ctx *ctx) 1313 { 1314 struct mmu_interval_notifier *notifier = &range->notifier->notifier; 1315 struct hmm_range hmm_range = { 1316 .default_flags = HMM_PFN_REQ_FAULT | (ctx->read_only ? 0 : 1317 HMM_PFN_REQ_WRITE), 1318 .notifier = notifier, 1319 .start = drm_gpusvm_range_start(range), 1320 .end = drm_gpusvm_range_end(range), 1321 .dev_private_owner = gpusvm->device_private_page_owner, 1322 }; 1323 struct mm_struct *mm = gpusvm->mm; 1324 struct drm_gpusvm_zdd *zdd; 1325 unsigned long timeout = 1326 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); 1327 unsigned long i, j; 1328 unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), 1329 drm_gpusvm_range_end(range)); 1330 unsigned long num_dma_mapped; 1331 unsigned int order = 0; 1332 unsigned long *pfns; 1333 int err = 0; 1334 struct dev_pagemap *pagemap; 1335 struct drm_pagemap *dpagemap; 1336 1337 retry: 1338 hmm_range.notifier_seq = mmu_interval_read_begin(notifier); 1339 if (drm_gpusvm_range_pages_valid_unlocked(gpusvm, range)) 1340 goto set_seqno; 1341 1342 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); 1343 if (!pfns) 1344 return -ENOMEM; 1345 1346 if (!mmget_not_zero(mm)) { 1347 err = -EFAULT; 1348 goto err_free; 1349 } 1350 1351 hmm_range.hmm_pfns = pfns; 1352 while (true) { 1353 mmap_read_lock(mm); 1354 err = hmm_range_fault(&hmm_range); 1355 mmap_read_unlock(mm); 1356 1357 if (err == -EBUSY) { 1358 if (time_after(jiffies, timeout)) 1359 break; 1360 1361 hmm_range.notifier_seq = 1362 mmu_interval_read_begin(notifier); 1363 continue; 1364 } 1365 break; 1366 } 1367 mmput(mm); 1368 if (err) 1369 goto err_free; 1370 1371 map_pages: 1372 /* 1373 * Perform all dma mappings under the notifier lock to not 1374 * access freed pages. A notifier will either block on 1375 * the notifier lock or unmap dma. 1376 */ 1377 drm_gpusvm_notifier_lock(gpusvm); 1378 1379 if (range->flags.unmapped) { 1380 drm_gpusvm_notifier_unlock(gpusvm); 1381 err = -EFAULT; 1382 goto err_free; 1383 } 1384 1385 if (mmu_interval_read_retry(notifier, hmm_range.notifier_seq)) { 1386 drm_gpusvm_notifier_unlock(gpusvm); 1387 kvfree(pfns); 1388 goto retry; 1389 } 1390 1391 if (!range->dma_addr) { 1392 /* Unlock and restart mapping to allocate memory. */ 1393 drm_gpusvm_notifier_unlock(gpusvm); 1394 range->dma_addr = kvmalloc_array(npages, 1395 sizeof(*range->dma_addr), 1396 GFP_KERNEL); 1397 if (!range->dma_addr) { 1398 err = -ENOMEM; 1399 goto err_free; 1400 } 1401 goto map_pages; 1402 } 1403 1404 zdd = NULL; 1405 num_dma_mapped = 0; 1406 for (i = 0, j = 0; i < npages; ++j) { 1407 struct page *page = hmm_pfn_to_page(pfns[i]); 1408 1409 order = hmm_pfn_to_map_order(pfns[i]); 1410 if (is_device_private_page(page) || 1411 is_device_coherent_page(page)) { 1412 if (zdd != page->zone_device_data && i > 0) { 1413 err = -EOPNOTSUPP; 1414 goto err_unmap; 1415 } 1416 zdd = page->zone_device_data; 1417 if (pagemap != page_pgmap(page)) { 1418 if (i > 0) { 1419 err = -EOPNOTSUPP; 1420 goto err_unmap; 1421 } 1422 1423 pagemap = page_pgmap(page); 1424 dpagemap = zdd->devmem_allocation->dpagemap; 1425 if (drm_WARN_ON(gpusvm->drm, !dpagemap)) { 1426 /* 1427 * Raced. This is not supposed to happen 1428 * since hmm_range_fault() should've migrated 1429 * this page to system. 1430 */ 1431 err = -EAGAIN; 1432 goto err_unmap; 1433 } 1434 } 1435 range->dma_addr[j] = 1436 dpagemap->ops->device_map(dpagemap, 1437 gpusvm->drm->dev, 1438 page, order, 1439 DMA_BIDIRECTIONAL); 1440 if (dma_mapping_error(gpusvm->drm->dev, 1441 range->dma_addr[j].addr)) { 1442 err = -EFAULT; 1443 goto err_unmap; 1444 } 1445 } else { 1446 dma_addr_t addr; 1447 1448 if (is_zone_device_page(page) || zdd) { 1449 err = -EOPNOTSUPP; 1450 goto err_unmap; 1451 } 1452 1453 addr = dma_map_page(gpusvm->drm->dev, 1454 page, 0, 1455 PAGE_SIZE << order, 1456 DMA_BIDIRECTIONAL); 1457 if (dma_mapping_error(gpusvm->drm->dev, addr)) { 1458 err = -EFAULT; 1459 goto err_unmap; 1460 } 1461 1462 range->dma_addr[j] = drm_pagemap_device_addr_encode 1463 (addr, DRM_INTERCONNECT_SYSTEM, order, 1464 DMA_BIDIRECTIONAL); 1465 } 1466 i += 1 << order; 1467 num_dma_mapped = i; 1468 range->flags.has_dma_mapping = true; 1469 } 1470 1471 if (zdd) { 1472 range->flags.has_devmem_pages = true; 1473 range->dpagemap = dpagemap; 1474 } 1475 1476 drm_gpusvm_notifier_unlock(gpusvm); 1477 kvfree(pfns); 1478 set_seqno: 1479 range->notifier_seq = hmm_range.notifier_seq; 1480 1481 return 0; 1482 1483 err_unmap: 1484 __drm_gpusvm_range_unmap_pages(gpusvm, range, num_dma_mapped); 1485 drm_gpusvm_notifier_unlock(gpusvm); 1486 err_free: 1487 kvfree(pfns); 1488 if (err == -EAGAIN) 1489 goto retry; 1490 return err; 1491 } 1492 EXPORT_SYMBOL_GPL(drm_gpusvm_range_get_pages); 1493 1494 /** 1495 * drm_gpusvm_range_unmap_pages() - Unmap pages associated with a GPU SVM range 1496 * @gpusvm: Pointer to the GPU SVM structure 1497 * @range: Pointer to the GPU SVM range structure 1498 * @ctx: GPU SVM context 1499 * 1500 * This function unmaps pages associated with a GPU SVM range. If @in_notifier 1501 * is set, it is assumed that gpusvm->notifier_lock is held in write mode; if it 1502 * is clear, it acquires gpusvm->notifier_lock in read mode. Must be called on 1503 * each GPU SVM range attached to notifier in gpusvm->ops->invalidate for IOMMU 1504 * security model. 1505 */ 1506 void drm_gpusvm_range_unmap_pages(struct drm_gpusvm *gpusvm, 1507 struct drm_gpusvm_range *range, 1508 const struct drm_gpusvm_ctx *ctx) 1509 { 1510 unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), 1511 drm_gpusvm_range_end(range)); 1512 1513 if (ctx->in_notifier) 1514 lockdep_assert_held_write(&gpusvm->notifier_lock); 1515 else 1516 drm_gpusvm_notifier_lock(gpusvm); 1517 1518 __drm_gpusvm_range_unmap_pages(gpusvm, range, npages); 1519 1520 if (!ctx->in_notifier) 1521 drm_gpusvm_notifier_unlock(gpusvm); 1522 } 1523 EXPORT_SYMBOL_GPL(drm_gpusvm_range_unmap_pages); 1524 1525 /** 1526 * drm_gpusvm_migration_unlock_put_page() - Put a migration page 1527 * @page: Pointer to the page to put 1528 * 1529 * This function unlocks and puts a page. 1530 */ 1531 static void drm_gpusvm_migration_unlock_put_page(struct page *page) 1532 { 1533 unlock_page(page); 1534 put_page(page); 1535 } 1536 1537 /** 1538 * drm_gpusvm_migration_unlock_put_pages() - Put migration pages 1539 * @npages: Number of pages 1540 * @migrate_pfn: Array of migrate page frame numbers 1541 * 1542 * This function unlocks and puts an array of pages. 1543 */ 1544 static void drm_gpusvm_migration_unlock_put_pages(unsigned long npages, 1545 unsigned long *migrate_pfn) 1546 { 1547 unsigned long i; 1548 1549 for (i = 0; i < npages; ++i) { 1550 struct page *page; 1551 1552 if (!migrate_pfn[i]) 1553 continue; 1554 1555 page = migrate_pfn_to_page(migrate_pfn[i]); 1556 drm_gpusvm_migration_unlock_put_page(page); 1557 migrate_pfn[i] = 0; 1558 } 1559 } 1560 1561 /** 1562 * drm_gpusvm_get_devmem_page() - Get a reference to a device memory page 1563 * @page: Pointer to the page 1564 * @zdd: Pointer to the GPU SVM zone device data 1565 * 1566 * This function associates the given page with the specified GPU SVM zone 1567 * device data and initializes it for zone device usage. 1568 */ 1569 static void drm_gpusvm_get_devmem_page(struct page *page, 1570 struct drm_gpusvm_zdd *zdd) 1571 { 1572 page->zone_device_data = drm_gpusvm_zdd_get(zdd); 1573 zone_device_page_init(page); 1574 } 1575 1576 /** 1577 * drm_gpusvm_migrate_map_pages() - Map migration pages for GPU SVM migration 1578 * @dev: The device for which the pages are being mapped 1579 * @dma_addr: Array to store DMA addresses corresponding to mapped pages 1580 * @migrate_pfn: Array of migrate page frame numbers to map 1581 * @npages: Number of pages to map 1582 * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL) 1583 * 1584 * This function maps pages of memory for migration usage in GPU SVM. It 1585 * iterates over each page frame number provided in @migrate_pfn, maps the 1586 * corresponding page, and stores the DMA address in the provided @dma_addr 1587 * array. 1588 * 1589 * Return: 0 on success, -EFAULT if an error occurs during mapping. 1590 */ 1591 static int drm_gpusvm_migrate_map_pages(struct device *dev, 1592 dma_addr_t *dma_addr, 1593 unsigned long *migrate_pfn, 1594 unsigned long npages, 1595 enum dma_data_direction dir) 1596 { 1597 unsigned long i; 1598 1599 for (i = 0; i < npages; ++i) { 1600 struct page *page = migrate_pfn_to_page(migrate_pfn[i]); 1601 1602 if (!page) 1603 continue; 1604 1605 if (WARN_ON_ONCE(is_zone_device_page(page))) 1606 return -EFAULT; 1607 1608 dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir); 1609 if (dma_mapping_error(dev, dma_addr[i])) 1610 return -EFAULT; 1611 } 1612 1613 return 0; 1614 } 1615 1616 /** 1617 * drm_gpusvm_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration 1618 * @dev: The device for which the pages were mapped 1619 * @dma_addr: Array of DMA addresses corresponding to mapped pages 1620 * @npages: Number of pages to unmap 1621 * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL) 1622 * 1623 * This function unmaps previously mapped pages of memory for GPU Shared Virtual 1624 * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks 1625 * if it's valid and not already unmapped, and unmaps the corresponding page. 1626 */ 1627 static void drm_gpusvm_migrate_unmap_pages(struct device *dev, 1628 dma_addr_t *dma_addr, 1629 unsigned long npages, 1630 enum dma_data_direction dir) 1631 { 1632 unsigned long i; 1633 1634 for (i = 0; i < npages; ++i) { 1635 if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i])) 1636 continue; 1637 1638 dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir); 1639 } 1640 } 1641 1642 /** 1643 * drm_gpusvm_migrate_to_devmem() - Migrate GPU SVM range to device memory 1644 * @gpusvm: Pointer to the GPU SVM structure 1645 * @range: Pointer to the GPU SVM range structure 1646 * @devmem_allocation: Pointer to the device memory allocation. The caller 1647 * should hold a reference to the device memory allocation, 1648 * which should be dropped via ops->devmem_release or upon 1649 * the failure of this function. 1650 * @ctx: GPU SVM context 1651 * 1652 * This function migrates the specified GPU SVM range to device memory. It 1653 * performs the necessary setup and invokes the driver-specific operations for 1654 * migration to device memory. Upon successful return, @devmem_allocation can 1655 * safely reference @range until ops->devmem_release is called which only upon 1656 * successful return. Expected to be called while holding the mmap lock in read 1657 * mode. 1658 * 1659 * Return: 0 on success, negative error code on failure. 1660 */ 1661 int drm_gpusvm_migrate_to_devmem(struct drm_gpusvm *gpusvm, 1662 struct drm_gpusvm_range *range, 1663 struct drm_gpusvm_devmem *devmem_allocation, 1664 const struct drm_gpusvm_ctx *ctx) 1665 { 1666 const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops; 1667 unsigned long start = drm_gpusvm_range_start(range), 1668 end = drm_gpusvm_range_end(range); 1669 struct migrate_vma migrate = { 1670 .start = start, 1671 .end = end, 1672 .pgmap_owner = gpusvm->device_private_page_owner, 1673 .flags = MIGRATE_VMA_SELECT_SYSTEM, 1674 }; 1675 struct mm_struct *mm = gpusvm->mm; 1676 unsigned long i, npages = npages_in_range(start, end); 1677 struct vm_area_struct *vas; 1678 struct drm_gpusvm_zdd *zdd = NULL; 1679 struct page **pages; 1680 dma_addr_t *dma_addr; 1681 void *buf; 1682 int err; 1683 1684 mmap_assert_locked(gpusvm->mm); 1685 1686 if (!range->flags.migrate_devmem) 1687 return -EINVAL; 1688 1689 if (!ops->populate_devmem_pfn || !ops->copy_to_devmem || 1690 !ops->copy_to_ram) 1691 return -EOPNOTSUPP; 1692 1693 vas = vma_lookup(mm, start); 1694 if (!vas) { 1695 err = -ENOENT; 1696 goto err_out; 1697 } 1698 1699 if (end > vas->vm_end || start < vas->vm_start) { 1700 err = -EINVAL; 1701 goto err_out; 1702 } 1703 1704 if (!vma_is_anonymous(vas)) { 1705 err = -EBUSY; 1706 goto err_out; 1707 } 1708 1709 buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) + 1710 sizeof(*pages), GFP_KERNEL); 1711 if (!buf) { 1712 err = -ENOMEM; 1713 goto err_out; 1714 } 1715 dma_addr = buf + (2 * sizeof(*migrate.src) * npages); 1716 pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages; 1717 1718 zdd = drm_gpusvm_zdd_alloc(gpusvm->device_private_page_owner); 1719 if (!zdd) { 1720 err = -ENOMEM; 1721 goto err_free; 1722 } 1723 1724 migrate.vma = vas; 1725 migrate.src = buf; 1726 migrate.dst = migrate.src + npages; 1727 1728 err = migrate_vma_setup(&migrate); 1729 if (err) 1730 goto err_free; 1731 1732 if (!migrate.cpages) { 1733 err = -EFAULT; 1734 goto err_free; 1735 } 1736 1737 if (migrate.cpages != npages) { 1738 err = -EBUSY; 1739 goto err_finalize; 1740 } 1741 1742 err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst); 1743 if (err) 1744 goto err_finalize; 1745 1746 err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr, 1747 migrate.src, npages, DMA_TO_DEVICE); 1748 if (err) 1749 goto err_finalize; 1750 1751 for (i = 0; i < npages; ++i) { 1752 struct page *page = pfn_to_page(migrate.dst[i]); 1753 1754 pages[i] = page; 1755 migrate.dst[i] = migrate_pfn(migrate.dst[i]); 1756 drm_gpusvm_get_devmem_page(page, zdd); 1757 } 1758 1759 err = ops->copy_to_devmem(pages, dma_addr, npages); 1760 if (err) 1761 goto err_finalize; 1762 1763 /* Upon success bind devmem allocation to range and zdd */ 1764 zdd->devmem_allocation = devmem_allocation; /* Owns ref */ 1765 1766 err_finalize: 1767 if (err) 1768 drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst); 1769 migrate_vma_pages(&migrate); 1770 migrate_vma_finalize(&migrate); 1771 drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages, 1772 DMA_TO_DEVICE); 1773 err_free: 1774 if (zdd) 1775 drm_gpusvm_zdd_put(zdd); 1776 kvfree(buf); 1777 err_out: 1778 return err; 1779 } 1780 EXPORT_SYMBOL_GPL(drm_gpusvm_migrate_to_devmem); 1781 1782 /** 1783 * drm_gpusvm_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area 1784 * @vas: Pointer to the VM area structure, can be NULL 1785 * @fault_page: Fault page 1786 * @npages: Number of pages to populate 1787 * @mpages: Number of pages to migrate 1788 * @src_mpfn: Source array of migrate PFNs 1789 * @mpfn: Array of migrate PFNs to populate 1790 * @addr: Start address for PFN allocation 1791 * 1792 * This function populates the RAM migrate page frame numbers (PFNs) for the 1793 * specified VM area structure. It allocates and locks pages in the VM area for 1794 * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use 1795 * alloc_page for allocation. 1796 * 1797 * Return: 0 on success, negative error code on failure. 1798 */ 1799 static int drm_gpusvm_migrate_populate_ram_pfn(struct vm_area_struct *vas, 1800 struct page *fault_page, 1801 unsigned long npages, 1802 unsigned long *mpages, 1803 unsigned long *src_mpfn, 1804 unsigned long *mpfn, 1805 unsigned long addr) 1806 { 1807 unsigned long i; 1808 1809 for (i = 0; i < npages; ++i, addr += PAGE_SIZE) { 1810 struct page *page, *src_page; 1811 1812 if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE)) 1813 continue; 1814 1815 src_page = migrate_pfn_to_page(src_mpfn[i]); 1816 if (!src_page) 1817 continue; 1818 1819 if (fault_page) { 1820 if (src_page->zone_device_data != 1821 fault_page->zone_device_data) 1822 continue; 1823 } 1824 1825 if (vas) 1826 page = alloc_page_vma(GFP_HIGHUSER, vas, addr); 1827 else 1828 page = alloc_page(GFP_HIGHUSER); 1829 1830 if (!page) 1831 goto free_pages; 1832 1833 mpfn[i] = migrate_pfn(page_to_pfn(page)); 1834 } 1835 1836 for (i = 0; i < npages; ++i) { 1837 struct page *page = migrate_pfn_to_page(mpfn[i]); 1838 1839 if (!page) 1840 continue; 1841 1842 WARN_ON_ONCE(!trylock_page(page)); 1843 ++*mpages; 1844 } 1845 1846 return 0; 1847 1848 free_pages: 1849 for (i = 0; i < npages; ++i) { 1850 struct page *page = migrate_pfn_to_page(mpfn[i]); 1851 1852 if (!page) 1853 continue; 1854 1855 put_page(page); 1856 mpfn[i] = 0; 1857 } 1858 return -ENOMEM; 1859 } 1860 1861 /** 1862 * drm_gpusvm_evict_to_ram() - Evict GPU SVM range to RAM 1863 * @devmem_allocation: Pointer to the device memory allocation 1864 * 1865 * Similar to __drm_gpusvm_migrate_to_ram but does not require mmap lock and 1866 * migration done via migrate_device_* functions. 1867 * 1868 * Return: 0 on success, negative error code on failure. 1869 */ 1870 int drm_gpusvm_evict_to_ram(struct drm_gpusvm_devmem *devmem_allocation) 1871 { 1872 const struct drm_gpusvm_devmem_ops *ops = devmem_allocation->ops; 1873 unsigned long npages, mpages = 0; 1874 struct page **pages; 1875 unsigned long *src, *dst; 1876 dma_addr_t *dma_addr; 1877 void *buf; 1878 int i, err = 0; 1879 unsigned int retry_count = 2; 1880 1881 npages = devmem_allocation->size >> PAGE_SHIFT; 1882 1883 retry: 1884 if (!mmget_not_zero(devmem_allocation->mm)) 1885 return -EFAULT; 1886 1887 buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) + 1888 sizeof(*pages), GFP_KERNEL); 1889 if (!buf) { 1890 err = -ENOMEM; 1891 goto err_out; 1892 } 1893 src = buf; 1894 dst = buf + (sizeof(*src) * npages); 1895 dma_addr = buf + (2 * sizeof(*src) * npages); 1896 pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages; 1897 1898 err = ops->populate_devmem_pfn(devmem_allocation, npages, src); 1899 if (err) 1900 goto err_free; 1901 1902 err = migrate_device_pfns(src, npages); 1903 if (err) 1904 goto err_free; 1905 1906 err = drm_gpusvm_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages, 1907 src, dst, 0); 1908 if (err || !mpages) 1909 goto err_finalize; 1910 1911 err = drm_gpusvm_migrate_map_pages(devmem_allocation->dev, dma_addr, 1912 dst, npages, DMA_FROM_DEVICE); 1913 if (err) 1914 goto err_finalize; 1915 1916 for (i = 0; i < npages; ++i) 1917 pages[i] = migrate_pfn_to_page(src[i]); 1918 1919 err = ops->copy_to_ram(pages, dma_addr, npages); 1920 if (err) 1921 goto err_finalize; 1922 1923 err_finalize: 1924 if (err) 1925 drm_gpusvm_migration_unlock_put_pages(npages, dst); 1926 migrate_device_pages(src, dst, npages); 1927 migrate_device_finalize(src, dst, npages); 1928 drm_gpusvm_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages, 1929 DMA_FROM_DEVICE); 1930 err_free: 1931 kvfree(buf); 1932 err_out: 1933 mmput_async(devmem_allocation->mm); 1934 1935 if (completion_done(&devmem_allocation->detached)) 1936 return 0; 1937 1938 if (retry_count--) { 1939 cond_resched(); 1940 goto retry; 1941 } 1942 1943 return err ?: -EBUSY; 1944 } 1945 EXPORT_SYMBOL_GPL(drm_gpusvm_evict_to_ram); 1946 1947 /** 1948 * __drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (internal) 1949 * @vas: Pointer to the VM area structure 1950 * @device_private_page_owner: Device private pages owner 1951 * @page: Pointer to the page for fault handling (can be NULL) 1952 * @fault_addr: Fault address 1953 * @size: Size of migration 1954 * 1955 * This internal function performs the migration of the specified GPU SVM range 1956 * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and 1957 * invokes the driver-specific operations for migration to RAM. 1958 * 1959 * Return: 0 on success, negative error code on failure. 1960 */ 1961 static int __drm_gpusvm_migrate_to_ram(struct vm_area_struct *vas, 1962 void *device_private_page_owner, 1963 struct page *page, 1964 unsigned long fault_addr, 1965 unsigned long size) 1966 { 1967 struct migrate_vma migrate = { 1968 .vma = vas, 1969 .pgmap_owner = device_private_page_owner, 1970 .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE | 1971 MIGRATE_VMA_SELECT_DEVICE_COHERENT, 1972 .fault_page = page, 1973 }; 1974 struct drm_gpusvm_zdd *zdd; 1975 const struct drm_gpusvm_devmem_ops *ops; 1976 struct device *dev = NULL; 1977 unsigned long npages, mpages = 0; 1978 struct page **pages; 1979 dma_addr_t *dma_addr; 1980 unsigned long start, end; 1981 void *buf; 1982 int i, err = 0; 1983 1984 start = ALIGN_DOWN(fault_addr, size); 1985 end = ALIGN(fault_addr + 1, size); 1986 1987 /* Corner where VMA area struct has been partially unmapped */ 1988 if (start < vas->vm_start) 1989 start = vas->vm_start; 1990 if (end > vas->vm_end) 1991 end = vas->vm_end; 1992 1993 migrate.start = start; 1994 migrate.end = end; 1995 npages = npages_in_range(start, end); 1996 1997 buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) + 1998 sizeof(*pages), GFP_KERNEL); 1999 if (!buf) { 2000 err = -ENOMEM; 2001 goto err_out; 2002 } 2003 dma_addr = buf + (2 * sizeof(*migrate.src) * npages); 2004 pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages; 2005 2006 migrate.vma = vas; 2007 migrate.src = buf; 2008 migrate.dst = migrate.src + npages; 2009 2010 err = migrate_vma_setup(&migrate); 2011 if (err) 2012 goto err_free; 2013 2014 /* Raced with another CPU fault, nothing to do */ 2015 if (!migrate.cpages) 2016 goto err_free; 2017 2018 if (!page) { 2019 for (i = 0; i < npages; ++i) { 2020 if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE)) 2021 continue; 2022 2023 page = migrate_pfn_to_page(migrate.src[i]); 2024 break; 2025 } 2026 2027 if (!page) 2028 goto err_finalize; 2029 } 2030 zdd = page->zone_device_data; 2031 ops = zdd->devmem_allocation->ops; 2032 dev = zdd->devmem_allocation->dev; 2033 2034 err = drm_gpusvm_migrate_populate_ram_pfn(vas, page, npages, &mpages, 2035 migrate.src, migrate.dst, 2036 start); 2037 if (err) 2038 goto err_finalize; 2039 2040 err = drm_gpusvm_migrate_map_pages(dev, dma_addr, migrate.dst, npages, 2041 DMA_FROM_DEVICE); 2042 if (err) 2043 goto err_finalize; 2044 2045 for (i = 0; i < npages; ++i) 2046 pages[i] = migrate_pfn_to_page(migrate.src[i]); 2047 2048 err = ops->copy_to_ram(pages, dma_addr, npages); 2049 if (err) 2050 goto err_finalize; 2051 2052 err_finalize: 2053 if (err) 2054 drm_gpusvm_migration_unlock_put_pages(npages, migrate.dst); 2055 migrate_vma_pages(&migrate); 2056 migrate_vma_finalize(&migrate); 2057 if (dev) 2058 drm_gpusvm_migrate_unmap_pages(dev, dma_addr, npages, 2059 DMA_FROM_DEVICE); 2060 err_free: 2061 kvfree(buf); 2062 err_out: 2063 2064 return err; 2065 } 2066 2067 /** 2068 * drm_gpusvm_range_evict - Evict GPU SVM range 2069 * @range: Pointer to the GPU SVM range to be removed 2070 * 2071 * This function evicts the specified GPU SVM range. This function will not 2072 * evict coherent pages. 2073 * 2074 * Return: 0 on success, a negative error code on failure. 2075 */ 2076 int drm_gpusvm_range_evict(struct drm_gpusvm *gpusvm, 2077 struct drm_gpusvm_range *range) 2078 { 2079 struct mmu_interval_notifier *notifier = &range->notifier->notifier; 2080 struct hmm_range hmm_range = { 2081 .default_flags = HMM_PFN_REQ_FAULT, 2082 .notifier = notifier, 2083 .start = drm_gpusvm_range_start(range), 2084 .end = drm_gpusvm_range_end(range), 2085 .dev_private_owner = NULL, 2086 }; 2087 unsigned long timeout = 2088 jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); 2089 unsigned long *pfns; 2090 unsigned long npages = npages_in_range(drm_gpusvm_range_start(range), 2091 drm_gpusvm_range_end(range)); 2092 int err = 0; 2093 struct mm_struct *mm = gpusvm->mm; 2094 2095 if (!mmget_not_zero(mm)) 2096 return -EFAULT; 2097 2098 pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); 2099 if (!pfns) 2100 return -ENOMEM; 2101 2102 hmm_range.hmm_pfns = pfns; 2103 while (!time_after(jiffies, timeout)) { 2104 hmm_range.notifier_seq = mmu_interval_read_begin(notifier); 2105 if (time_after(jiffies, timeout)) { 2106 err = -ETIME; 2107 break; 2108 } 2109 2110 mmap_read_lock(mm); 2111 err = hmm_range_fault(&hmm_range); 2112 mmap_read_unlock(mm); 2113 if (err != -EBUSY) 2114 break; 2115 } 2116 2117 kvfree(pfns); 2118 mmput(mm); 2119 2120 return err; 2121 } 2122 EXPORT_SYMBOL_GPL(drm_gpusvm_range_evict); 2123 2124 /** 2125 * drm_gpusvm_page_free() - Put GPU SVM zone device data associated with a page 2126 * @page: Pointer to the page 2127 * 2128 * This function is a callback used to put the GPU SVM zone device data 2129 * associated with a page when it is being released. 2130 */ 2131 static void drm_gpusvm_page_free(struct page *page) 2132 { 2133 drm_gpusvm_zdd_put(page->zone_device_data); 2134 } 2135 2136 /** 2137 * drm_gpusvm_migrate_to_ram() - Migrate GPU SVM range to RAM (page fault handler) 2138 * @vmf: Pointer to the fault information structure 2139 * 2140 * This function is a page fault handler used to migrate a GPU SVM range to RAM. 2141 * It retrieves the GPU SVM range information from the faulting page and invokes 2142 * the internal migration function to migrate the range back to RAM. 2143 * 2144 * Return: VM_FAULT_SIGBUS on failure, 0 on success. 2145 */ 2146 static vm_fault_t drm_gpusvm_migrate_to_ram(struct vm_fault *vmf) 2147 { 2148 struct drm_gpusvm_zdd *zdd = vmf->page->zone_device_data; 2149 int err; 2150 2151 err = __drm_gpusvm_migrate_to_ram(vmf->vma, 2152 zdd->device_private_page_owner, 2153 vmf->page, vmf->address, 2154 zdd->devmem_allocation->size); 2155 2156 return err ? VM_FAULT_SIGBUS : 0; 2157 } 2158 2159 /* 2160 * drm_gpusvm_pagemap_ops - Device page map operations for GPU SVM 2161 */ 2162 static const struct dev_pagemap_ops drm_gpusvm_pagemap_ops = { 2163 .page_free = drm_gpusvm_page_free, 2164 .migrate_to_ram = drm_gpusvm_migrate_to_ram, 2165 }; 2166 2167 /** 2168 * drm_gpusvm_pagemap_ops_get() - Retrieve GPU SVM device page map operations 2169 * 2170 * Return: Pointer to the GPU SVM device page map operations structure. 2171 */ 2172 const struct dev_pagemap_ops *drm_gpusvm_pagemap_ops_get(void) 2173 { 2174 return &drm_gpusvm_pagemap_ops; 2175 } 2176 EXPORT_SYMBOL_GPL(drm_gpusvm_pagemap_ops_get); 2177 2178 /** 2179 * drm_gpusvm_has_mapping() - Check if GPU SVM has mapping for the given address range 2180 * @gpusvm: Pointer to the GPU SVM structure. 2181 * @start: Start address 2182 * @end: End address 2183 * 2184 * Return: True if GPU SVM has mapping, False otherwise 2185 */ 2186 bool drm_gpusvm_has_mapping(struct drm_gpusvm *gpusvm, unsigned long start, 2187 unsigned long end) 2188 { 2189 struct drm_gpusvm_notifier *notifier; 2190 2191 drm_gpusvm_for_each_notifier(notifier, gpusvm, start, end) { 2192 struct drm_gpusvm_range *range = NULL; 2193 2194 drm_gpusvm_for_each_range(range, notifier, start, end) 2195 return true; 2196 } 2197 2198 return false; 2199 } 2200 EXPORT_SYMBOL_GPL(drm_gpusvm_has_mapping); 2201 2202 /** 2203 * drm_gpusvm_range_set_unmapped() - Mark a GPU SVM range as unmapped 2204 * @range: Pointer to the GPU SVM range structure. 2205 * @mmu_range: Pointer to the MMU notifier range structure. 2206 * 2207 * This function marks a GPU SVM range as unmapped and sets the partial_unmap flag 2208 * if the range partially falls within the provided MMU notifier range. 2209 */ 2210 void drm_gpusvm_range_set_unmapped(struct drm_gpusvm_range *range, 2211 const struct mmu_notifier_range *mmu_range) 2212 { 2213 lockdep_assert_held_write(&range->gpusvm->notifier_lock); 2214 2215 range->flags.unmapped = true; 2216 if (drm_gpusvm_range_start(range) < mmu_range->start || 2217 drm_gpusvm_range_end(range) > mmu_range->end) 2218 range->flags.partial_unmap = true; 2219 } 2220 EXPORT_SYMBOL_GPL(drm_gpusvm_range_set_unmapped); 2221 2222 /** 2223 * drm_gpusvm_devmem_init() - Initialize a GPU SVM device memory allocation 2224 * 2225 * @dev: Pointer to the device structure which device memory allocation belongs to 2226 * @mm: Pointer to the mm_struct for the address space 2227 * @ops: Pointer to the operations structure for GPU SVM device memory 2228 * @dpagemap: The struct drm_pagemap we're allocating from. 2229 * @size: Size of device memory allocation 2230 */ 2231 void drm_gpusvm_devmem_init(struct drm_gpusvm_devmem *devmem_allocation, 2232 struct device *dev, struct mm_struct *mm, 2233 const struct drm_gpusvm_devmem_ops *ops, 2234 struct drm_pagemap *dpagemap, size_t size) 2235 { 2236 init_completion(&devmem_allocation->detached); 2237 devmem_allocation->dev = dev; 2238 devmem_allocation->mm = mm; 2239 devmem_allocation->ops = ops; 2240 devmem_allocation->dpagemap = dpagemap; 2241 devmem_allocation->size = size; 2242 } 2243 EXPORT_SYMBOL_GPL(drm_gpusvm_devmem_init); 2244 2245 MODULE_DESCRIPTION("DRM GPUSVM"); 2246 MODULE_LICENSE("GPL"); 2247