1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3 * Copyright © 2024-2025 Intel Corporation
4 */
5
6 #include <linux/dma-fence.h>
7 #include <linux/dma-mapping.h>
8 #include <linux/migrate.h>
9 #include <linux/pagemap.h>
10 #include <drm/drm_drv.h>
11 #include <drm/drm_pagemap.h>
12 #include <drm/drm_pagemap_util.h>
13 #include <drm/drm_print.h>
14
15 /**
16 * DOC: Overview
17 *
18 * The DRM pagemap layer is intended to augment the dev_pagemap functionality by
19 * providing a way to populate a struct mm_struct virtual range with device
20 * private pages and to provide helpers to abstract device memory allocations,
21 * to migrate memory back and forth between device memory and system RAM and
22 * to handle access (and in the future migration) between devices implementing
23 * a fast interconnect that is not necessarily visible to the rest of the
24 * system.
25 *
26 * Typically the DRM pagemap receives requests from one or more DRM GPU SVM
27 * instances to populate struct mm_struct virtual ranges with memory, and the
28 * migration is best effort only and may thus fail. The implementation should
29 * also handle device unbinding by blocking (return an -ENODEV) error for new
30 * population requests and after that migrate all device pages to system ram.
31 */
32
33 /**
34 * DOC: Migration
35 *
36 * Migration granularity typically follows the GPU SVM range requests, but
37 * if there are clashes, due to races or due to the fact that multiple GPU
38 * SVM instances have different views of the ranges used, and because of that
39 * parts of a requested range is already present in the requested device memory,
40 * the implementation has a variety of options. It can fail and it can choose
41 * to populate only the part of the range that isn't already in device memory,
42 * and it can evict the range to system before trying to migrate. Ideally an
43 * implementation would just try to migrate the missing part of the range and
44 * allocate just enough memory to do so.
45 *
46 * When migrating to system memory as a response to a cpu fault or a device
47 * memory eviction request, currently a full device memory allocation is
48 * migrated back to system. Moving forward this might need improvement for
49 * situations where a single page needs bouncing between system memory and
50 * device memory due to, for example, atomic operations.
51 *
52 * Key DRM pagemap components:
53 *
54 * - Device Memory Allocations:
55 * Embedded structure containing enough information for the drm_pagemap to
56 * migrate to / from device memory.
57 *
58 * - Device Memory Operations:
59 * Define the interface for driver-specific device memory operations
60 * release memory, populate pfns, and copy to / from device memory.
61 */
62
63 /**
64 * struct drm_pagemap_zdd - GPU SVM zone device data
65 *
66 * @refcount: Reference count for the zdd
67 * @devmem_allocation: device memory allocation
68 * @dpagemap: Refcounted pointer to the underlying struct drm_pagemap.
69 *
70 * This structure serves as a generic wrapper installed in
71 * page->zone_device_data. It provides infrastructure for looking up a device
72 * memory allocation upon CPU page fault and asynchronously releasing device
73 * memory once the CPU has no page references. Asynchronous release is useful
74 * because CPU page references can be dropped in IRQ contexts, while releasing
75 * device memory likely requires sleeping locks.
76 */
77 struct drm_pagemap_zdd {
78 struct kref refcount;
79 struct drm_pagemap_devmem *devmem_allocation;
80 struct drm_pagemap *dpagemap;
81 };
82
83 /**
84 * drm_pagemap_zdd_alloc() - Allocate a zdd structure.
85 * @dpagemap: Pointer to the underlying struct drm_pagemap.
86 *
87 * This function allocates and initializes a new zdd structure. It sets up the
88 * reference count and initializes the destroy work.
89 *
90 * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
91 */
92 static struct drm_pagemap_zdd *
drm_pagemap_zdd_alloc(struct drm_pagemap * dpagemap)93 drm_pagemap_zdd_alloc(struct drm_pagemap *dpagemap)
94 {
95 struct drm_pagemap_zdd *zdd;
96
97 zdd = kmalloc_obj(*zdd);
98 if (!zdd)
99 return NULL;
100
101 kref_init(&zdd->refcount);
102 zdd->devmem_allocation = NULL;
103 zdd->dpagemap = drm_pagemap_get(dpagemap);
104
105 return zdd;
106 }
107
108 /**
109 * drm_pagemap_zdd_get() - Get a reference to a zdd structure.
110 * @zdd: Pointer to the zdd structure.
111 *
112 * This function increments the reference count of the provided zdd structure.
113 *
114 * Return: Pointer to the zdd structure.
115 */
drm_pagemap_zdd_get(struct drm_pagemap_zdd * zdd)116 static struct drm_pagemap_zdd *drm_pagemap_zdd_get(struct drm_pagemap_zdd *zdd)
117 {
118 kref_get(&zdd->refcount);
119 return zdd;
120 }
121
122 /**
123 * drm_pagemap_zdd_destroy() - Destroy a zdd structure.
124 * @ref: Pointer to the reference count structure.
125 *
126 * This function queues the destroy_work of the zdd for asynchronous destruction.
127 */
drm_pagemap_zdd_destroy(struct kref * ref)128 static void drm_pagemap_zdd_destroy(struct kref *ref)
129 {
130 struct drm_pagemap_zdd *zdd =
131 container_of(ref, struct drm_pagemap_zdd, refcount);
132 struct drm_pagemap_devmem *devmem = zdd->devmem_allocation;
133 struct drm_pagemap *dpagemap = zdd->dpagemap;
134
135 if (devmem) {
136 complete_all(&devmem->detached);
137 if (devmem->ops->devmem_release)
138 devmem->ops->devmem_release(devmem);
139 }
140 kfree(zdd);
141 drm_pagemap_put(dpagemap);
142 }
143
144 /**
145 * drm_pagemap_zdd_put() - Put a zdd reference.
146 * @zdd: Pointer to the zdd structure.
147 *
148 * This function decrements the reference count of the provided zdd structure
149 * and schedules its destruction if the count drops to zero.
150 */
drm_pagemap_zdd_put(struct drm_pagemap_zdd * zdd)151 static void drm_pagemap_zdd_put(struct drm_pagemap_zdd *zdd)
152 {
153 kref_put(&zdd->refcount, drm_pagemap_zdd_destroy);
154 }
155
156 /**
157 * drm_pagemap_migration_unlock_put_folio() - Put a migration folio
158 * @folio: Pointer to the folio to put
159 *
160 * This function unlocks and puts a folio.
161 */
drm_pagemap_migration_unlock_put_folio(struct folio * folio)162 static void drm_pagemap_migration_unlock_put_folio(struct folio *folio)
163 {
164 folio_unlock(folio);
165 folio_put(folio);
166 }
167
168 /**
169 * drm_pagemap_migration_unlock_put_pages() - Put migration pages
170 * @npages: Number of pages
171 * @migrate_pfn: Array of migrate page frame numbers
172 *
173 * This function unlocks and puts an array of pages.
174 */
drm_pagemap_migration_unlock_put_pages(unsigned long npages,unsigned long * migrate_pfn)175 static void drm_pagemap_migration_unlock_put_pages(unsigned long npages,
176 unsigned long *migrate_pfn)
177 {
178 unsigned long i;
179
180 for (i = 0; i < npages;) {
181 struct page *page;
182 struct folio *folio;
183 unsigned int order = 0;
184
185 if (!migrate_pfn[i])
186 goto next;
187
188 page = migrate_pfn_to_page(migrate_pfn[i]);
189 folio = page_folio(page);
190 order = folio_order(folio);
191
192 drm_pagemap_migration_unlock_put_folio(folio);
193 migrate_pfn[i] = 0;
194
195 next:
196 i += NR_PAGES(order);
197 }
198 }
199
200 /**
201 * drm_pagemap_get_devmem_page() - Get a reference to a device memory page
202 * @page: Pointer to the page
203 * @order: Order
204 * @zdd: Pointer to the GPU SVM zone device data
205 *
206 * This function associates the given page with the specified GPU SVM zone
207 * device data and initializes it for zone device usage.
208 */
drm_pagemap_get_devmem_page(struct page * page,unsigned int order,struct drm_pagemap_zdd * zdd)209 static void drm_pagemap_get_devmem_page(struct page *page,
210 unsigned int order,
211 struct drm_pagemap_zdd *zdd)
212 {
213 zone_device_folio_init((struct folio *)page, zdd->dpagemap->pagemap,
214 order);
215 folio_set_zone_device_data(page_folio(page), drm_pagemap_zdd_get(zdd));
216 }
217
218 /**
219 * drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
220 * @dev: The device performing the migration.
221 * @local_dpagemap: The drm_pagemap local to the migrating device.
222 * @pagemap_addr: Array to store DMA information corresponding to mapped pages.
223 * @migrate_pfn: Array of page frame numbers of system pages or peer pages to map.
224 * @npages: Number of system pages or peer pages to map.
225 * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
226 * @mdetails: Details governing the migration behaviour.
227 *
228 * This function maps pages of memory for migration usage in GPU SVM. It
229 * iterates over each page frame number provided in @migrate_pfn, maps the
230 * corresponding page, and stores the DMA address in the provided @dma_addr
231 * array.
232 *
233 * Returns: 0 on success, -EFAULT if an error occurs during mapping.
234 */
drm_pagemap_migrate_map_pages(struct device * dev,struct drm_pagemap * local_dpagemap,struct drm_pagemap_addr * pagemap_addr,unsigned long * migrate_pfn,unsigned long npages,enum dma_data_direction dir,const struct drm_pagemap_migrate_details * mdetails)235 static int drm_pagemap_migrate_map_pages(struct device *dev,
236 struct drm_pagemap *local_dpagemap,
237 struct drm_pagemap_addr *pagemap_addr,
238 unsigned long *migrate_pfn,
239 unsigned long npages,
240 enum dma_data_direction dir,
241 const struct drm_pagemap_migrate_details *mdetails)
242 {
243 unsigned long num_peer_pages = 0, num_local_pages = 0, i;
244
245 for (i = 0; i < npages;) {
246 struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
247 dma_addr_t dma_addr;
248 struct folio *folio;
249 unsigned int order = 0;
250
251 if (!page)
252 goto next;
253
254 folio = page_folio(page);
255 order = folio_order(folio);
256
257 if (is_device_private_page(page)) {
258 struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page);
259 struct drm_pagemap *dpagemap = zdd->dpagemap;
260 struct drm_pagemap_addr addr;
261
262 if (dpagemap == local_dpagemap) {
263 if (!mdetails->can_migrate_same_pagemap)
264 goto next;
265
266 num_local_pages += NR_PAGES(order);
267 } else {
268 num_peer_pages += NR_PAGES(order);
269 }
270
271 addr = dpagemap->ops->device_map(dpagemap, dev, page, order, dir);
272 if (dma_mapping_error(dev, addr.addr))
273 return -EFAULT;
274
275 pagemap_addr[i] = addr;
276 } else {
277 dma_addr = dma_map_page(dev, page, 0, page_size(page), dir);
278 if (dma_mapping_error(dev, dma_addr))
279 return -EFAULT;
280
281 pagemap_addr[i] =
282 drm_pagemap_addr_encode(dma_addr,
283 DRM_INTERCONNECT_SYSTEM,
284 order, dir);
285 }
286
287 next:
288 i += NR_PAGES(order);
289 }
290
291 if (num_peer_pages)
292 drm_dbg(local_dpagemap->drm, "Migrating %lu peer pages over interconnect.\n",
293 num_peer_pages);
294 if (num_local_pages)
295 drm_dbg(local_dpagemap->drm, "Migrating %lu local pages over interconnect.\n",
296 num_local_pages);
297
298 return 0;
299 }
300
301 /**
302 * drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
303 * @dev: The device for which the pages were mapped
304 * @migrate_pfn: Array of migrate pfns set up for the mapped pages. Used to
305 * determine the drm_pagemap of a peer device private page.
306 * @pagemap_addr: Array of DMA information corresponding to mapped pages
307 * @npages: Number of pages to unmap
308 * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
309 *
310 * This function unmaps previously mapped pages of memory for GPU Shared Virtual
311 * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks
312 * if it's valid and not already unmapped, and unmaps the corresponding page.
313 */
drm_pagemap_migrate_unmap_pages(struct device * dev,struct drm_pagemap_addr * pagemap_addr,unsigned long * migrate_pfn,unsigned long npages,enum dma_data_direction dir)314 static void drm_pagemap_migrate_unmap_pages(struct device *dev,
315 struct drm_pagemap_addr *pagemap_addr,
316 unsigned long *migrate_pfn,
317 unsigned long npages,
318 enum dma_data_direction dir)
319 {
320 unsigned long i;
321
322 for (i = 0; i < npages;) {
323 struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
324
325 if (!page || !pagemap_addr[i].addr || dma_mapping_error(dev, pagemap_addr[i].addr))
326 goto next;
327
328 if (is_zone_device_page(page)) {
329 struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page);
330 struct drm_pagemap *dpagemap = zdd->dpagemap;
331
332 dpagemap->ops->device_unmap(dpagemap, dev, &pagemap_addr[i]);
333 } else {
334 dma_unmap_page(dev, pagemap_addr[i].addr,
335 PAGE_SIZE << pagemap_addr[i].order, dir);
336 }
337
338 next:
339 i += NR_PAGES(pagemap_addr[i].order);
340 }
341 }
342
343 static unsigned long
npages_in_range(unsigned long start,unsigned long end)344 npages_in_range(unsigned long start, unsigned long end)
345 {
346 return (end - start) >> PAGE_SHIFT;
347 }
348
349 static int
drm_pagemap_migrate_remote_to_local(struct drm_pagemap_devmem * devmem,struct device * remote_device,struct drm_pagemap * remote_dpagemap,unsigned long local_pfns[],struct page * remote_pages[],struct drm_pagemap_addr pagemap_addr[],unsigned long npages,const struct drm_pagemap_devmem_ops * ops,const struct drm_pagemap_migrate_details * mdetails)350 drm_pagemap_migrate_remote_to_local(struct drm_pagemap_devmem *devmem,
351 struct device *remote_device,
352 struct drm_pagemap *remote_dpagemap,
353 unsigned long local_pfns[],
354 struct page *remote_pages[],
355 struct drm_pagemap_addr pagemap_addr[],
356 unsigned long npages,
357 const struct drm_pagemap_devmem_ops *ops,
358 const struct drm_pagemap_migrate_details *mdetails)
359
360 {
361 int err = drm_pagemap_migrate_map_pages(remote_device, remote_dpagemap,
362 pagemap_addr, local_pfns,
363 npages, DMA_FROM_DEVICE, mdetails);
364
365 if (err)
366 goto out;
367
368 err = ops->copy_to_ram(remote_pages, pagemap_addr, npages,
369 devmem->pre_migrate_fence);
370 out:
371 drm_pagemap_migrate_unmap_pages(remote_device, pagemap_addr, local_pfns,
372 npages, DMA_FROM_DEVICE);
373 return err;
374 }
375
376 static int
drm_pagemap_migrate_sys_to_dev(struct drm_pagemap_devmem * devmem,unsigned long sys_pfns[],struct page * local_pages[],struct drm_pagemap_addr pagemap_addr[],unsigned long npages,const struct drm_pagemap_devmem_ops * ops,const struct drm_pagemap_migrate_details * mdetails)377 drm_pagemap_migrate_sys_to_dev(struct drm_pagemap_devmem *devmem,
378 unsigned long sys_pfns[],
379 struct page *local_pages[],
380 struct drm_pagemap_addr pagemap_addr[],
381 unsigned long npages,
382 const struct drm_pagemap_devmem_ops *ops,
383 const struct drm_pagemap_migrate_details *mdetails)
384 {
385 int err = drm_pagemap_migrate_map_pages(devmem->dev, devmem->dpagemap,
386 pagemap_addr, sys_pfns, npages,
387 DMA_TO_DEVICE, mdetails);
388
389 if (err)
390 goto out;
391
392 err = ops->copy_to_devmem(local_pages, pagemap_addr, npages,
393 devmem->pre_migrate_fence);
394 out:
395 drm_pagemap_migrate_unmap_pages(devmem->dev, pagemap_addr, sys_pfns, npages,
396 DMA_TO_DEVICE);
397 return err;
398 }
399
400 /**
401 * struct migrate_range_loc - Cursor into the loop over migrate_pfns for migrating to
402 * device.
403 * @start: The current loop index.
404 * @device: migrating device.
405 * @dpagemap: Pointer to struct drm_pagemap used by the migrating device.
406 * @ops: The copy ops to be used for the migrating device.
407 */
408 struct migrate_range_loc {
409 unsigned long start;
410 struct device *device;
411 struct drm_pagemap *dpagemap;
412 const struct drm_pagemap_devmem_ops *ops;
413 };
414
drm_pagemap_migrate_range(struct drm_pagemap_devmem * devmem,unsigned long src_pfns[],unsigned long dst_pfns[],struct page * pages[],struct drm_pagemap_addr pagemap_addr[],struct migrate_range_loc * last,const struct migrate_range_loc * cur,const struct drm_pagemap_migrate_details * mdetails)415 static int drm_pagemap_migrate_range(struct drm_pagemap_devmem *devmem,
416 unsigned long src_pfns[],
417 unsigned long dst_pfns[],
418 struct page *pages[],
419 struct drm_pagemap_addr pagemap_addr[],
420 struct migrate_range_loc *last,
421 const struct migrate_range_loc *cur,
422 const struct drm_pagemap_migrate_details *mdetails)
423 {
424 int ret = 0;
425
426 if (cur->start == 0)
427 goto out;
428
429 if (cur->start <= last->start)
430 return 0;
431
432 if (cur->dpagemap == last->dpagemap && cur->ops == last->ops)
433 return 0;
434
435 if (last->dpagemap)
436 ret = drm_pagemap_migrate_remote_to_local(devmem,
437 last->device,
438 last->dpagemap,
439 &dst_pfns[last->start],
440 &pages[last->start],
441 &pagemap_addr[last->start],
442 cur->start - last->start,
443 last->ops, mdetails);
444
445 else
446 ret = drm_pagemap_migrate_sys_to_dev(devmem,
447 &src_pfns[last->start],
448 &pages[last->start],
449 &pagemap_addr[last->start],
450 cur->start - last->start,
451 last->ops, mdetails);
452
453 out:
454 *last = *cur;
455 return ret;
456 }
457
458 /**
459 * drm_pagemap_cpages() - Count collected pages
460 * @migrate_pfn: Array of migrate_pfn entries to account
461 * @npages: Number of entries in @migrate_pfn
462 *
463 * Compute the total number of minimum-sized pages represented by the
464 * collected entries in @migrate_pfn. The total is derived from the
465 * order encoded in each entry.
466 *
467 * Return: Total number of minimum-sized pages.
468 */
drm_pagemap_cpages(unsigned long * migrate_pfn,unsigned long npages)469 static int drm_pagemap_cpages(unsigned long *migrate_pfn, unsigned long npages)
470 {
471 unsigned long i, cpages = 0;
472
473 for (i = 0; i < npages;) {
474 struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
475 struct folio *folio;
476 unsigned int order = 0;
477
478 if (page) {
479 folio = page_folio(page);
480 order = folio_order(folio);
481 cpages += NR_PAGES(order);
482 } else if (migrate_pfn[i] & MIGRATE_PFN_COMPOUND) {
483 order = HPAGE_PMD_ORDER;
484 cpages += NR_PAGES(order);
485 }
486
487 i += NR_PAGES(order);
488 }
489
490 return cpages;
491 }
492
493 /**
494 * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory
495 * @devmem_allocation: The device memory allocation to migrate to.
496 * The caller should hold a reference to the device memory allocation,
497 * and the reference is consumed by this function even if it returns with
498 * an error.
499 * @mm: Pointer to the struct mm_struct.
500 * @start: Start of the virtual address range to migrate.
501 * @end: End of the virtual address range to migrate.
502 * @mdetails: Details to govern the migration.
503 *
504 * This function migrates the specified virtual address range to device memory.
505 * It performs the necessary setup and invokes the driver-specific operations for
506 * migration to device memory. Expected to be called while holding the mmap lock in
507 * at least read mode.
508 *
509 * Note: The @timeslice_ms parameter can typically be used to force data to
510 * remain in pagemap pages long enough for a GPU to perform a task and to prevent
511 * a migration livelock. One alternative would be for the GPU driver to block
512 * in a mmu_notifier for the specified amount of time, but adding the
513 * functionality to the pagemap is likely nicer to the system as a whole.
514 *
515 * Return: %0 on success, negative error code on failure.
516 */
drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem * devmem_allocation,struct mm_struct * mm,unsigned long start,unsigned long end,const struct drm_pagemap_migrate_details * mdetails)517 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
518 struct mm_struct *mm,
519 unsigned long start, unsigned long end,
520 const struct drm_pagemap_migrate_details *mdetails)
521 {
522 const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
523 struct drm_pagemap *dpagemap = devmem_allocation->dpagemap;
524 struct dev_pagemap *pagemap = dpagemap->pagemap;
525 struct migrate_vma migrate = {
526 .start = start,
527 .end = end,
528 .pgmap_owner = pagemap->owner,
529 .flags = MIGRATE_VMA_SELECT_SYSTEM | MIGRATE_VMA_SELECT_DEVICE_COHERENT |
530 MIGRATE_VMA_SELECT_DEVICE_PRIVATE | MIGRATE_VMA_SELECT_COMPOUND,
531 };
532 unsigned long i, npages = npages_in_range(start, end);
533 unsigned long own_pages = 0, migrated_pages = 0;
534 struct migrate_range_loc cur, last = {.device = dpagemap->drm->dev, .ops = ops};
535 struct vm_area_struct *vas;
536 struct drm_pagemap_zdd *zdd = NULL;
537 struct page **pages;
538 struct drm_pagemap_addr *pagemap_addr;
539 void *buf;
540 int err;
541
542 mmap_assert_locked(mm);
543
544 if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
545 !ops->copy_to_ram)
546 return -EOPNOTSUPP;
547
548 vas = vma_lookup(mm, start);
549 if (!vas) {
550 err = -ENOENT;
551 goto err_out;
552 }
553
554 if (end > vas->vm_end || start < vas->vm_start) {
555 err = -EINVAL;
556 goto err_out;
557 }
558
559 if (!vma_is_anonymous(vas)) {
560 err = -EBUSY;
561 goto err_out;
562 }
563
564 buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
565 sizeof(*pages), GFP_KERNEL);
566 if (!buf) {
567 err = -ENOMEM;
568 goto err_out;
569 }
570 pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
571 pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
572
573 zdd = drm_pagemap_zdd_alloc(dpagemap);
574 if (!zdd) {
575 err = -ENOMEM;
576 kvfree(buf);
577 goto err_out;
578 }
579 zdd->devmem_allocation = devmem_allocation; /* Owns ref */
580
581 migrate.vma = vas;
582 migrate.src = buf;
583 migrate.dst = migrate.src + npages;
584
585 err = migrate_vma_setup(&migrate);
586 if (err)
587 goto err_free;
588
589 if (!migrate.cpages) {
590 /* No pages to migrate. Raced or unknown device pages. */
591 err = -EBUSY;
592 goto err_free;
593 }
594
595 if (migrate.cpages != npages &&
596 drm_pagemap_cpages(migrate.src, npages) != npages) {
597 /*
598 * Some pages to migrate. But we want to migrate all or
599 * nothing. Raced or unknown device pages.
600 */
601 err = -EBUSY;
602 goto err_aborted_migration;
603 }
604
605 /* Count device-private pages to migrate */
606 for (i = 0; i < npages;) {
607 struct page *src_page = migrate_pfn_to_page(migrate.src[i]);
608 unsigned long nr_pages = src_page ? NR_PAGES(folio_order(page_folio(src_page))) : 1;
609
610 if (src_page && is_zone_device_page(src_page)) {
611 if (page_pgmap(src_page) == pagemap)
612 own_pages += nr_pages;
613 }
614
615 i += nr_pages;
616 }
617
618 drm_dbg(dpagemap->drm, "Total pages %lu; Own pages: %lu.\n",
619 npages, own_pages);
620 if (own_pages == npages) {
621 err = 0;
622 drm_dbg(dpagemap->drm, "Migration wasn't necessary.\n");
623 goto err_aborted_migration;
624 } else if (own_pages && !mdetails->can_migrate_same_pagemap) {
625 err = -EBUSY;
626 drm_dbg(dpagemap->drm, "Migration aborted due to fragmentation.\n");
627 goto err_aborted_migration;
628 }
629
630 err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
631 if (err)
632 goto err_aborted_migration;
633
634 own_pages = 0;
635
636 for (i = 0; i < npages;) {
637 unsigned long j;
638 struct page *page = pfn_to_page(migrate.dst[i]);
639 struct page *src_page = migrate_pfn_to_page(migrate.src[i]);
640 unsigned int order = 0;
641
642 cur.start = i;
643 pages[i] = NULL;
644 if (src_page && is_device_private_page(src_page)) {
645 struct drm_pagemap_zdd *src_zdd =
646 drm_pagemap_page_zone_device_data(src_page);
647
648 if (page_pgmap(src_page) == pagemap &&
649 !mdetails->can_migrate_same_pagemap) {
650 migrate.dst[i] = 0;
651 own_pages++;
652 goto next;
653 }
654 if (mdetails->source_peer_migrates) {
655 cur.dpagemap = src_zdd->dpagemap;
656 cur.ops = src_zdd->devmem_allocation->ops;
657 cur.device = cur.dpagemap->drm->dev;
658 pages[i] = src_page;
659 }
660 }
661 if (!pages[i]) {
662 cur.dpagemap = NULL;
663 cur.ops = ops;
664 cur.device = dpagemap->drm->dev;
665 pages[i] = page;
666 }
667 migrate.dst[i] = migrate_pfn(migrate.dst[i]);
668
669 if (migrate.src[i] & MIGRATE_PFN_COMPOUND) {
670 drm_WARN_ONCE(dpagemap->drm, src_page &&
671 folio_order(page_folio(src_page)) != HPAGE_PMD_ORDER,
672 "Unexpected folio order\n");
673
674 order = HPAGE_PMD_ORDER;
675 migrate.dst[i] |= MIGRATE_PFN_COMPOUND;
676
677 for (j = 1; j < NR_PAGES(order) && i + j < npages; j++)
678 migrate.dst[i + j] = 0;
679 }
680
681 drm_pagemap_get_devmem_page(page, order, zdd);
682
683 /* If we switched the migrating drm_pagemap, migrate previous pages now */
684 err = drm_pagemap_migrate_range(devmem_allocation, migrate.src, migrate.dst,
685 pages, pagemap_addr, &last, &cur,
686 mdetails);
687 if (err) {
688 npages = i + 1;
689 goto err_finalize;
690 }
691
692 next:
693 i += NR_PAGES(order);
694 }
695
696 cur.start = npages;
697 cur.ops = NULL; /* Force migration */
698 err = drm_pagemap_migrate_range(devmem_allocation, migrate.src, migrate.dst,
699 pages, pagemap_addr, &last, &cur, mdetails);
700 if (err)
701 goto err_finalize;
702
703 drm_WARN_ON(dpagemap->drm, !!own_pages);
704
705 dma_fence_put(devmem_allocation->pre_migrate_fence);
706 devmem_allocation->pre_migrate_fence = NULL;
707
708 /* Upon success bind devmem allocation to range and zdd */
709 devmem_allocation->timeslice_expiration = get_jiffies_64() +
710 msecs_to_jiffies(mdetails->timeslice_ms);
711
712 err_finalize:
713 if (err)
714 drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
715 err_aborted_migration:
716 migrate_vma_pages(&migrate);
717
718 for (i = 0; !err && i < npages;) {
719 struct page *page = migrate_pfn_to_page(migrate.src[i]);
720 unsigned long nr_pages = page ? NR_PAGES(folio_order(page_folio(page))) : 1;
721
722 if (migrate.src[i] & MIGRATE_PFN_MIGRATE)
723 migrated_pages += nr_pages;
724
725 i += nr_pages;
726 }
727
728 if (!err && migrated_pages < npages - own_pages) {
729 drm_dbg(dpagemap->drm, "Raced while finalizing migration.\n");
730 err = -EBUSY;
731 }
732
733 migrate_vma_finalize(&migrate);
734 err_free:
735 drm_pagemap_zdd_put(zdd);
736 kvfree(buf);
737 return err;
738
739 err_out:
740 devmem_allocation->ops->devmem_release(devmem_allocation);
741 return err;
742 }
743 EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem);
744
745 /**
746 * drm_pagemap_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area
747 * @vas: Pointer to the VM area structure, can be NULL
748 * @fault_page: Fault page
749 * @npages: Number of pages to populate
750 * @mpages: Number of pages to migrate
751 * @src_mpfn: Source array of migrate PFNs
752 * @mpfn: Array of migrate PFNs to populate
753 * @addr: Start address for PFN allocation
754 *
755 * This function populates the RAM migrate page frame numbers (PFNs) for the
756 * specified VM area structure. It allocates and locks pages in the VM area for
757 * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use
758 * alloc_page for allocation.
759 *
760 * Return: 0 on success, negative error code on failure.
761 */
drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct * vas,struct page * fault_page,unsigned long npages,unsigned long * mpages,unsigned long * src_mpfn,unsigned long * mpfn,unsigned long addr)762 static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
763 struct page *fault_page,
764 unsigned long npages,
765 unsigned long *mpages,
766 unsigned long *src_mpfn,
767 unsigned long *mpfn,
768 unsigned long addr)
769 {
770 unsigned long i;
771
772 for (i = 0; i < npages;) {
773 struct page *page = NULL, *src_page;
774 struct folio *folio;
775 unsigned int order = 0;
776
777 if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
778 goto next;
779
780 src_page = migrate_pfn_to_page(src_mpfn[i]);
781 if (!src_page)
782 goto next;
783
784 if (fault_page) {
785 if (drm_pagemap_page_zone_device_data(src_page) !=
786 drm_pagemap_page_zone_device_data(fault_page))
787 goto next;
788 }
789
790 order = folio_order(page_folio(src_page));
791
792 /* TODO: Support fallback to single pages if THP allocation fails */
793 if (vas)
794 folio = vma_alloc_folio(GFP_HIGHUSER, order, vas, addr);
795 else
796 folio = folio_alloc(GFP_HIGHUSER, order);
797
798 if (!folio)
799 goto free_pages;
800
801 page = folio_page(folio, 0);
802 mpfn[i] = migrate_pfn(page_to_pfn(page));
803
804 if (order)
805 mpfn[i] |= MIGRATE_PFN_COMPOUND;
806 next:
807 if (page)
808 addr += page_size(page);
809 else
810 addr += PAGE_SIZE;
811
812 i += NR_PAGES(order);
813 }
814
815 for (i = 0; i < npages;) {
816 struct page *page = migrate_pfn_to_page(mpfn[i]);
817 unsigned int order = 0;
818
819 if (!page)
820 goto next_lock;
821
822 WARN_ON_ONCE(!folio_trylock(page_folio(page)));
823
824 order = folio_order(page_folio(page));
825 *mpages += NR_PAGES(order);
826
827 next_lock:
828 i += NR_PAGES(order);
829 }
830
831 return 0;
832
833 free_pages:
834 for (i = 0; i < npages;) {
835 struct page *page = migrate_pfn_to_page(mpfn[i]);
836 unsigned int order = 0;
837
838 if (!page)
839 goto next_put;
840
841 put_page(page);
842 mpfn[i] = 0;
843
844 order = folio_order(page_folio(page));
845
846 next_put:
847 i += NR_PAGES(order);
848 }
849 return -ENOMEM;
850 }
851
852 static void drm_pagemap_dev_unhold_work(struct work_struct *work);
853 static LLIST_HEAD(drm_pagemap_unhold_list);
854 static DECLARE_WORK(drm_pagemap_work, drm_pagemap_dev_unhold_work);
855
856 /**
857 * struct drm_pagemap_dev_hold - Struct to aid in drm_device release.
858 * @link: Link into drm_pagemap_unhold_list for deferred reference releases.
859 * @drm: drm device to put.
860 *
861 * When a struct drm_pagemap is released, we also need to release the
862 * reference it holds on the drm device. However, typically that needs
863 * to be done separately from a system-wide workqueue.
864 * Each time a struct drm_pagemap is initialized
865 * (or re-initialized if cached) therefore allocate a separate
866 * drm_pagemap_dev_hold item, from which we put the drm device and
867 * associated module.
868 */
869 struct drm_pagemap_dev_hold {
870 struct llist_node link;
871 struct drm_device *drm;
872 };
873
drm_pagemap_release(struct kref * ref)874 static void drm_pagemap_release(struct kref *ref)
875 {
876 struct drm_pagemap *dpagemap = container_of(ref, typeof(*dpagemap), ref);
877 struct drm_pagemap_dev_hold *dev_hold = dpagemap->dev_hold;
878
879 /*
880 * We know the pagemap provider is alive at this point, since
881 * the struct drm_pagemap_dev_hold holds a reference to the
882 * pagemap provider drm_device and its module.
883 */
884 dpagemap->dev_hold = NULL;
885 drm_pagemap_shrinker_add(dpagemap);
886 llist_add(&dev_hold->link, &drm_pagemap_unhold_list);
887 schedule_work(&drm_pagemap_work);
888 /*
889 * Here, either the provider device is still alive, since if called from
890 * page_free(), the caller is holding a reference on the dev_pagemap,
891 * or if called from drm_pagemap_put(), the direct caller is still alive.
892 * This ensures we can't race with THIS module unload.
893 */
894 }
895
drm_pagemap_dev_unhold_work(struct work_struct * work)896 static void drm_pagemap_dev_unhold_work(struct work_struct *work)
897 {
898 struct llist_node *node = llist_del_all(&drm_pagemap_unhold_list);
899 struct drm_pagemap_dev_hold *dev_hold, *next;
900
901 /*
902 * Deferred release of drm_pagemap provider device and module.
903 * THIS module is kept alive during the release by the
904 * flush_work() in the drm_pagemap_exit() function.
905 */
906 llist_for_each_entry_safe(dev_hold, next, node, link) {
907 struct drm_device *drm = dev_hold->drm;
908 struct module *module = drm->driver->fops->owner;
909
910 drm_dbg(drm, "Releasing reference on provider device and module.\n");
911 drm_dev_put(drm);
912 module_put(module);
913 kfree(dev_hold);
914 }
915 }
916
917 static struct drm_pagemap_dev_hold *
drm_pagemap_dev_hold(struct drm_pagemap * dpagemap)918 drm_pagemap_dev_hold(struct drm_pagemap *dpagemap)
919 {
920 struct drm_pagemap_dev_hold *dev_hold;
921 struct drm_device *drm = dpagemap->drm;
922
923 dev_hold = kzalloc_obj(*dev_hold);
924 if (!dev_hold)
925 return ERR_PTR(-ENOMEM);
926
927 init_llist_node(&dev_hold->link);
928 dev_hold->drm = drm;
929 (void)try_module_get(drm->driver->fops->owner);
930 drm_dev_get(drm);
931
932 return dev_hold;
933 }
934
935 /**
936 * drm_pagemap_reinit() - Reinitialize a drm_pagemap
937 * @dpagemap: The drm_pagemap to reinitialize
938 *
939 * Reinitialize a drm_pagemap, for which drm_pagemap_release
940 * has already been called. This interface is intended for the
941 * situation where the driver caches a destroyed drm_pagemap.
942 *
943 * Return: 0 on success, negative error code on failure.
944 */
drm_pagemap_reinit(struct drm_pagemap * dpagemap)945 int drm_pagemap_reinit(struct drm_pagemap *dpagemap)
946 {
947 dpagemap->dev_hold = drm_pagemap_dev_hold(dpagemap);
948 if (IS_ERR(dpagemap->dev_hold))
949 return PTR_ERR(dpagemap->dev_hold);
950
951 kref_init(&dpagemap->ref);
952 return 0;
953 }
954 EXPORT_SYMBOL(drm_pagemap_reinit);
955
956 /**
957 * drm_pagemap_init() - Initialize a pre-allocated drm_pagemap
958 * @dpagemap: The drm_pagemap to initialize.
959 * @pagemap: The associated dev_pagemap providing the device
960 * private pages.
961 * @drm: The drm device. The drm_pagemap holds a reference on the
962 * drm_device and the module owning the drm_device until
963 * drm_pagemap_release(). This facilitates drm_pagemap exporting.
964 * @ops: The drm_pagemap ops.
965 *
966 * Initialize and take an initial reference on a drm_pagemap.
967 * After successful return, use drm_pagemap_put() to destroy.
968 *
969 ** Return: 0 on success, negative error code on error.
970 */
drm_pagemap_init(struct drm_pagemap * dpagemap,struct dev_pagemap * pagemap,struct drm_device * drm,const struct drm_pagemap_ops * ops)971 int drm_pagemap_init(struct drm_pagemap *dpagemap,
972 struct dev_pagemap *pagemap,
973 struct drm_device *drm,
974 const struct drm_pagemap_ops *ops)
975 {
976 kref_init(&dpagemap->ref);
977 dpagemap->ops = ops;
978 dpagemap->pagemap = pagemap;
979 dpagemap->drm = drm;
980 dpagemap->cache = NULL;
981 INIT_LIST_HEAD(&dpagemap->shrink_link);
982
983 return drm_pagemap_reinit(dpagemap);
984 }
985 EXPORT_SYMBOL(drm_pagemap_init);
986
987 /**
988 * drm_pagemap_put() - Put a struct drm_pagemap reference
989 * @dpagemap: Pointer to a struct drm_pagemap object.
990 *
991 * Puts a struct drm_pagemap reference and frees the drm_pagemap object
992 * if the refount reaches zero.
993 */
drm_pagemap_put(struct drm_pagemap * dpagemap)994 void drm_pagemap_put(struct drm_pagemap *dpagemap)
995 {
996 if (likely(dpagemap)) {
997 drm_pagemap_shrinker_might_lock(dpagemap);
998 kref_put(&dpagemap->ref, drm_pagemap_release);
999 }
1000 }
1001 EXPORT_SYMBOL(drm_pagemap_put);
1002
1003 /**
1004 * drm_pagemap_evict_to_ram() - Evict GPU SVM range to RAM
1005 * @devmem_allocation: Pointer to the device memory allocation
1006 *
1007 * Similar to __drm_pagemap_migrate_to_ram but does not require mmap lock and
1008 * migration done via migrate_device_* functions.
1009 *
1010 * Return: 0 on success, negative error code on failure.
1011 */
drm_pagemap_evict_to_ram(struct drm_pagemap_devmem * devmem_allocation)1012 int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
1013 {
1014 const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
1015 struct drm_pagemap_migrate_details mdetails = {};
1016 unsigned long npages, mpages = 0;
1017 struct page **pages;
1018 unsigned long *src, *dst;
1019 struct drm_pagemap_addr *pagemap_addr;
1020 void *buf;
1021 int i, err = 0;
1022 unsigned int retry_count = 2;
1023
1024 npages = devmem_allocation->size >> PAGE_SHIFT;
1025
1026 retry:
1027 if (!mmget_not_zero(devmem_allocation->mm))
1028 return -EFAULT;
1029
1030 buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*pagemap_addr) +
1031 sizeof(*pages), GFP_KERNEL);
1032 if (!buf) {
1033 err = -ENOMEM;
1034 goto err_out;
1035 }
1036 src = buf;
1037 dst = buf + (sizeof(*src) * npages);
1038 pagemap_addr = buf + (2 * sizeof(*src) * npages);
1039 pages = buf + (2 * sizeof(*src) + sizeof(*pagemap_addr)) * npages;
1040
1041 err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
1042 if (err)
1043 goto err_free;
1044
1045 err = migrate_device_pfns(src, npages);
1046 if (err)
1047 goto err_free;
1048
1049 err = drm_pagemap_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
1050 src, dst, 0);
1051 if (err || !mpages)
1052 goto err_finalize;
1053
1054 err = drm_pagemap_migrate_map_pages(devmem_allocation->dev,
1055 devmem_allocation->dpagemap, pagemap_addr,
1056 dst, npages, DMA_FROM_DEVICE,
1057 &mdetails);
1058 if (err)
1059 goto err_finalize;
1060
1061 for (i = 0; i < npages;) {
1062 unsigned int order = 0;
1063
1064 pages[i] = migrate_pfn_to_page(src[i]);
1065 if (pages[i])
1066 order = folio_order(page_folio(pages[i]));
1067
1068 i += NR_PAGES(order);
1069 }
1070
1071 err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
1072 if (err)
1073 goto err_finalize;
1074
1075 err_finalize:
1076 if (err)
1077 drm_pagemap_migration_unlock_put_pages(npages, dst);
1078 migrate_device_pages(src, dst, npages);
1079 migrate_device_finalize(src, dst, npages);
1080 drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, pagemap_addr, dst, npages,
1081 DMA_FROM_DEVICE);
1082
1083 err_free:
1084 kvfree(buf);
1085 err_out:
1086 mmput_async(devmem_allocation->mm);
1087
1088 if (completion_done(&devmem_allocation->detached))
1089 return 0;
1090
1091 if (retry_count--) {
1092 cond_resched();
1093 goto retry;
1094 }
1095
1096 return err ?: -EBUSY;
1097 }
1098 EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram);
1099
1100 /**
1101 * __drm_pagemap_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
1102 * @vas: Pointer to the VM area structure
1103 * @page: Pointer to the page for fault handling.
1104 * @fault_addr: Fault address
1105 * @size: Size of migration
1106 *
1107 * This internal function performs the migration of the specified GPU SVM range
1108 * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and
1109 * invokes the driver-specific operations for migration to RAM.
1110 *
1111 * Return: 0 on success, negative error code on failure.
1112 */
__drm_pagemap_migrate_to_ram(struct vm_area_struct * vas,struct page * page,unsigned long fault_addr,unsigned long size)1113 static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
1114 struct page *page,
1115 unsigned long fault_addr,
1116 unsigned long size)
1117 {
1118 struct migrate_vma migrate = {
1119 .vma = vas,
1120 .pgmap_owner = page_pgmap(page)->owner,
1121 .flags = MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
1122 MIGRATE_VMA_SELECT_DEVICE_COHERENT |
1123 MIGRATE_VMA_SELECT_COMPOUND,
1124 .fault_page = page,
1125 };
1126 struct drm_pagemap_migrate_details mdetails = {};
1127 struct drm_pagemap_zdd *zdd;
1128 const struct drm_pagemap_devmem_ops *ops;
1129 struct device *dev = NULL;
1130 unsigned long npages, mpages = 0;
1131 struct page **pages;
1132 struct drm_pagemap_addr *pagemap_addr;
1133 unsigned long start, end;
1134 void *buf;
1135 int i, err = 0;
1136
1137 zdd = drm_pagemap_page_zone_device_data(page);
1138 if (time_before64(get_jiffies_64(), zdd->devmem_allocation->timeslice_expiration))
1139 return 0;
1140
1141 start = ALIGN_DOWN(fault_addr, size);
1142 end = ALIGN(fault_addr + 1, size);
1143
1144 /* Corner where VMA area struct has been partially unmapped */
1145 if (start < vas->vm_start)
1146 start = vas->vm_start;
1147 if (end > vas->vm_end)
1148 end = vas->vm_end;
1149
1150 migrate.start = start;
1151 migrate.end = end;
1152 npages = npages_in_range(start, end);
1153
1154 buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*pagemap_addr) +
1155 sizeof(*pages), GFP_KERNEL);
1156 if (!buf) {
1157 err = -ENOMEM;
1158 goto err_out;
1159 }
1160 pagemap_addr = buf + (2 * sizeof(*migrate.src) * npages);
1161 pages = buf + (2 * sizeof(*migrate.src) + sizeof(*pagemap_addr)) * npages;
1162
1163 migrate.vma = vas;
1164 migrate.src = buf;
1165 migrate.dst = migrate.src + npages;
1166
1167 err = migrate_vma_setup(&migrate);
1168 if (err)
1169 goto err_free;
1170
1171 /* Raced with another CPU fault, nothing to do */
1172 if (!migrate.cpages)
1173 goto err_free;
1174
1175 ops = zdd->devmem_allocation->ops;
1176 dev = zdd->devmem_allocation->dev;
1177
1178 err = drm_pagemap_migrate_populate_ram_pfn(vas, page, npages, &mpages,
1179 migrate.src, migrate.dst,
1180 start);
1181 if (err)
1182 goto err_finalize;
1183
1184 err = drm_pagemap_migrate_map_pages(dev, zdd->dpagemap, pagemap_addr, migrate.dst, npages,
1185 DMA_FROM_DEVICE, &mdetails);
1186 if (err)
1187 goto err_finalize;
1188
1189 for (i = 0; i < npages;) {
1190 unsigned int order = 0;
1191
1192 pages[i] = migrate_pfn_to_page(migrate.src[i]);
1193 if (pages[i])
1194 order = folio_order(page_folio(pages[i]));
1195
1196 i += NR_PAGES(order);
1197 }
1198
1199 err = ops->copy_to_ram(pages, pagemap_addr, npages, NULL);
1200 if (err)
1201 goto err_finalize;
1202
1203 err_finalize:
1204 if (err)
1205 drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
1206 migrate_vma_pages(&migrate);
1207 migrate_vma_finalize(&migrate);
1208 if (dev)
1209 drm_pagemap_migrate_unmap_pages(dev, pagemap_addr, migrate.dst,
1210 npages, DMA_FROM_DEVICE);
1211 err_free:
1212 kvfree(buf);
1213 err_out:
1214
1215 return err;
1216 }
1217
1218 /**
1219 * drm_pagemap_folio_free() - Put GPU SVM zone device data associated with a folio
1220 * @folio: Pointer to the folio
1221 *
1222 * This function is a callback used to put the GPU SVM zone device data
1223 * associated with a page when it is being released.
1224 */
drm_pagemap_folio_free(struct folio * folio)1225 static void drm_pagemap_folio_free(struct folio *folio)
1226 {
1227 struct page *page = folio_page(folio, 0);
1228
1229 drm_pagemap_zdd_put(drm_pagemap_page_zone_device_data(page));
1230 }
1231
1232 /**
1233 * drm_pagemap_migrate_to_ram() - Migrate a virtual range to RAM (page fault handler)
1234 * @vmf: Pointer to the fault information structure
1235 *
1236 * This function is a page fault handler used to migrate a virtual range
1237 * to ram. The device memory allocation in which the device page is found is
1238 * migrated in its entirety.
1239 *
1240 * Returns:
1241 * VM_FAULT_SIGBUS on failure, 0 on success.
1242 */
drm_pagemap_migrate_to_ram(struct vm_fault * vmf)1243 static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
1244 {
1245 struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(vmf->page);
1246 int err;
1247
1248 err = __drm_pagemap_migrate_to_ram(vmf->vma,
1249 vmf->page, vmf->address,
1250 zdd->devmem_allocation->size);
1251
1252 return err ? VM_FAULT_SIGBUS : 0;
1253 }
1254
drm_pagemap_folio_split(struct folio * orig_folio,struct folio * new_folio)1255 static void drm_pagemap_folio_split(struct folio *orig_folio, struct folio *new_folio)
1256 {
1257 struct drm_pagemap_zdd *zdd;
1258
1259 if (!new_folio)
1260 return;
1261
1262 new_folio->pgmap = orig_folio->pgmap;
1263 zdd = folio_zone_device_data(orig_folio);
1264 folio_set_zone_device_data(new_folio, drm_pagemap_zdd_get(zdd));
1265 }
1266
1267 static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = {
1268 .folio_free = drm_pagemap_folio_free,
1269 .migrate_to_ram = drm_pagemap_migrate_to_ram,
1270 .folio_split = drm_pagemap_folio_split,
1271 };
1272
1273 /**
1274 * drm_pagemap_pagemap_ops_get() - Retrieve GPU SVM device page map operations
1275 *
1276 * Returns:
1277 * Pointer to the GPU SVM device page map operations structure.
1278 */
drm_pagemap_pagemap_ops_get(void)1279 const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void)
1280 {
1281 return &drm_pagemap_pagemap_ops;
1282 }
1283 EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
1284
1285 /**
1286 * drm_pagemap_devmem_init() - Initialize a drm_pagemap device memory allocation
1287 *
1288 * @devmem_allocation: The struct drm_pagemap_devmem to initialize.
1289 * @dev: Pointer to the device structure which device memory allocation belongs to
1290 * @mm: Pointer to the mm_struct for the address space
1291 * @ops: Pointer to the operations structure for GPU SVM device memory
1292 * @dpagemap: The struct drm_pagemap we're allocating from.
1293 * @size: Size of device memory allocation
1294 * @pre_migrate_fence: Fence to wait for or pipeline behind before migration starts.
1295 * (May be NULL).
1296 */
drm_pagemap_devmem_init(struct drm_pagemap_devmem * devmem_allocation,struct device * dev,struct mm_struct * mm,const struct drm_pagemap_devmem_ops * ops,struct drm_pagemap * dpagemap,size_t size,struct dma_fence * pre_migrate_fence)1297 void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
1298 struct device *dev, struct mm_struct *mm,
1299 const struct drm_pagemap_devmem_ops *ops,
1300 struct drm_pagemap *dpagemap, size_t size,
1301 struct dma_fence *pre_migrate_fence)
1302 {
1303 init_completion(&devmem_allocation->detached);
1304 devmem_allocation->dev = dev;
1305 devmem_allocation->mm = mm;
1306 devmem_allocation->ops = ops;
1307 devmem_allocation->dpagemap = dpagemap;
1308 devmem_allocation->size = size;
1309 devmem_allocation->pre_migrate_fence = pre_migrate_fence;
1310 }
1311 EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
1312
1313 /**
1314 * drm_pagemap_page_to_dpagemap() - Return a pointer the drm_pagemap of a page
1315 * @page: The struct page.
1316 *
1317 * Return: A pointer to the struct drm_pagemap of a device private page that
1318 * was populated from the struct drm_pagemap. If the page was *not* populated
1319 * from a struct drm_pagemap, the result is undefined and the function call
1320 * may result in dereferencing and invalid address.
1321 */
drm_pagemap_page_to_dpagemap(struct page * page)1322 struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
1323 {
1324 struct drm_pagemap_zdd *zdd = drm_pagemap_page_zone_device_data(page);
1325
1326 return zdd->devmem_allocation->dpagemap;
1327 }
1328 EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap);
1329
1330 /**
1331 * drm_pagemap_populate_mm() - Populate a virtual range with device memory pages
1332 * @dpagemap: Pointer to the drm_pagemap managing the device memory
1333 * @start: Start of the virtual range to populate.
1334 * @end: End of the virtual range to populate.
1335 * @mm: Pointer to the virtual address space.
1336 * @timeslice_ms: The time requested for the migrated pagemap pages to
1337 * be present in @mm before being allowed to be migrated back.
1338 *
1339 * Attempt to populate a virtual range with device memory pages,
1340 * clearing them or migrating data from the existing pages if necessary.
1341 * The function is best effort only, and implementations may vary
1342 * in how hard they try to satisfy the request.
1343 *
1344 * Return: %0 on success, negative error code on error. If the hardware
1345 * device was removed / unbound the function will return %-ENODEV.
1346 */
drm_pagemap_populate_mm(struct drm_pagemap * dpagemap,unsigned long start,unsigned long end,struct mm_struct * mm,unsigned long timeslice_ms)1347 int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
1348 unsigned long start, unsigned long end,
1349 struct mm_struct *mm,
1350 unsigned long timeslice_ms)
1351 {
1352 int err;
1353
1354 if (!mmget_not_zero(mm))
1355 return -EFAULT;
1356 mmap_read_lock(mm);
1357 err = dpagemap->ops->populate_mm(dpagemap, start, end, mm,
1358 timeslice_ms);
1359 mmap_read_unlock(mm);
1360 mmput(mm);
1361
1362 return err;
1363 }
1364 EXPORT_SYMBOL(drm_pagemap_populate_mm);
1365
drm_pagemap_destroy(struct drm_pagemap * dpagemap,bool is_atomic_or_reclaim)1366 void drm_pagemap_destroy(struct drm_pagemap *dpagemap, bool is_atomic_or_reclaim)
1367 {
1368 if (dpagemap->ops->destroy)
1369 dpagemap->ops->destroy(dpagemap, is_atomic_or_reclaim);
1370 else
1371 kfree(dpagemap);
1372 }
1373
drm_pagemap_exit(void)1374 static void drm_pagemap_exit(void)
1375 {
1376 flush_work(&drm_pagemap_work);
1377 if (WARN_ON(!llist_empty(&drm_pagemap_unhold_list)))
1378 disable_work_sync(&drm_pagemap_work);
1379 }
1380 module_exit(drm_pagemap_exit);
1381