xref: /linux/drivers/gpu/drm/drm_pagemap.c (revision 220994d61cebfc04f071d69049127657c7e8191b)
1 // SPDX-License-Identifier: GPL-2.0-only OR MIT
2 /*
3  * Copyright © 2024-2025 Intel Corporation
4  */
5 
6 #include <linux/dma-mapping.h>
7 #include <linux/migrate.h>
8 #include <linux/pagemap.h>
9 #include <drm/drm_drv.h>
10 #include <drm/drm_pagemap.h>
11 
12 /**
13  * DOC: Overview
14  *
15  * The DRM pagemap layer is intended to augment the dev_pagemap functionality by
16  * providing a way to populate a struct mm_struct virtual range with device
17  * private pages and to provide helpers to abstract device memory allocations,
18  * to migrate memory back and forth between device memory and system RAM and
19  * to handle access (and in the future migration) between devices implementing
20  * a fast interconnect that is not necessarily visible to the rest of the
21  * system.
22  *
23  * Typically the DRM pagemap receives requests from one or more DRM GPU SVM
24  * instances to populate struct mm_struct virtual ranges with memory, and the
25  * migration is best effort only and may thus fail. The implementation should
26  * also handle device unbinding by blocking (return an -ENODEV) error for new
27  * population requests and after that migrate all device pages to system ram.
28  */
29 
30 /**
31  * DOC: Migration
32  *
33  * Migration granularity typically follows the GPU SVM range requests, but
34  * if there are clashes, due to races or due to the fact that multiple GPU
35  * SVM instances have different views of the ranges used, and because of that
36  * parts of a requested range is already present in the requested device memory,
37  * the implementation has a variety of options. It can fail and it can choose
38  * to populate only the part of the range that isn't already in device memory,
39  * and it can evict the range to system before trying to migrate. Ideally an
40  * implementation would just try to migrate the missing part of the range and
41  * allocate just enough memory to do so.
42  *
43  * When migrating to system memory as a response to a cpu fault or a device
44  * memory eviction request, currently a full device memory allocation is
45  * migrated back to system. Moving forward this might need improvement for
46  * situations where a single page needs bouncing between system memory and
47  * device memory due to, for example, atomic operations.
48  *
49  * Key DRM pagemap components:
50  *
51  * - Device Memory Allocations:
52  *      Embedded structure containing enough information for the drm_pagemap to
53  *      migrate to / from device memory.
54  *
55  * - Device Memory Operations:
56  *      Define the interface for driver-specific device memory operations
57  *      release memory, populate pfns, and copy to / from device memory.
58  */
59 
60 /**
61  * struct drm_pagemap_zdd - GPU SVM zone device data
62  *
63  * @refcount: Reference count for the zdd
64  * @devmem_allocation: device memory allocation
65  * @device_private_page_owner: Device private pages owner
66  *
67  * This structure serves as a generic wrapper installed in
68  * page->zone_device_data. It provides infrastructure for looking up a device
69  * memory allocation upon CPU page fault and asynchronously releasing device
70  * memory once the CPU has no page references. Asynchronous release is useful
71  * because CPU page references can be dropped in IRQ contexts, while releasing
72  * device memory likely requires sleeping locks.
73  */
74 struct drm_pagemap_zdd {
75 	struct kref refcount;
76 	struct drm_pagemap_devmem *devmem_allocation;
77 	void *device_private_page_owner;
78 };
79 
80 /**
81  * drm_pagemap_zdd_alloc() - Allocate a zdd structure.
82  * @device_private_page_owner: Device private pages owner
83  *
84  * This function allocates and initializes a new zdd structure. It sets up the
85  * reference count and initializes the destroy work.
86  *
87  * Return: Pointer to the allocated zdd on success, ERR_PTR() on failure.
88  */
89 static struct drm_pagemap_zdd *
drm_pagemap_zdd_alloc(void * device_private_page_owner)90 drm_pagemap_zdd_alloc(void *device_private_page_owner)
91 {
92 	struct drm_pagemap_zdd *zdd;
93 
94 	zdd = kmalloc(sizeof(*zdd), GFP_KERNEL);
95 	if (!zdd)
96 		return NULL;
97 
98 	kref_init(&zdd->refcount);
99 	zdd->devmem_allocation = NULL;
100 	zdd->device_private_page_owner = device_private_page_owner;
101 
102 	return zdd;
103 }
104 
105 /**
106  * drm_pagemap_zdd_get() - Get a reference to a zdd structure.
107  * @zdd: Pointer to the zdd structure.
108  *
109  * This function increments the reference count of the provided zdd structure.
110  *
111  * Return: Pointer to the zdd structure.
112  */
drm_pagemap_zdd_get(struct drm_pagemap_zdd * zdd)113 static struct drm_pagemap_zdd *drm_pagemap_zdd_get(struct drm_pagemap_zdd *zdd)
114 {
115 	kref_get(&zdd->refcount);
116 	return zdd;
117 }
118 
119 /**
120  * drm_pagemap_zdd_destroy() - Destroy a zdd structure.
121  * @ref: Pointer to the reference count structure.
122  *
123  * This function queues the destroy_work of the zdd for asynchronous destruction.
124  */
drm_pagemap_zdd_destroy(struct kref * ref)125 static void drm_pagemap_zdd_destroy(struct kref *ref)
126 {
127 	struct drm_pagemap_zdd *zdd =
128 		container_of(ref, struct drm_pagemap_zdd, refcount);
129 	struct drm_pagemap_devmem *devmem = zdd->devmem_allocation;
130 
131 	if (devmem) {
132 		complete_all(&devmem->detached);
133 		if (devmem->ops->devmem_release)
134 			devmem->ops->devmem_release(devmem);
135 	}
136 	kfree(zdd);
137 }
138 
139 /**
140  * drm_pagemap_zdd_put() - Put a zdd reference.
141  * @zdd: Pointer to the zdd structure.
142  *
143  * This function decrements the reference count of the provided zdd structure
144  * and schedules its destruction if the count drops to zero.
145  */
drm_pagemap_zdd_put(struct drm_pagemap_zdd * zdd)146 static void drm_pagemap_zdd_put(struct drm_pagemap_zdd *zdd)
147 {
148 	kref_put(&zdd->refcount, drm_pagemap_zdd_destroy);
149 }
150 
151 /**
152  * drm_pagemap_migration_unlock_put_page() - Put a migration page
153  * @page: Pointer to the page to put
154  *
155  * This function unlocks and puts a page.
156  */
drm_pagemap_migration_unlock_put_page(struct page * page)157 static void drm_pagemap_migration_unlock_put_page(struct page *page)
158 {
159 	unlock_page(page);
160 	put_page(page);
161 }
162 
163 /**
164  * drm_pagemap_migration_unlock_put_pages() - Put migration pages
165  * @npages: Number of pages
166  * @migrate_pfn: Array of migrate page frame numbers
167  *
168  * This function unlocks and puts an array of pages.
169  */
drm_pagemap_migration_unlock_put_pages(unsigned long npages,unsigned long * migrate_pfn)170 static void drm_pagemap_migration_unlock_put_pages(unsigned long npages,
171 						   unsigned long *migrate_pfn)
172 {
173 	unsigned long i;
174 
175 	for (i = 0; i < npages; ++i) {
176 		struct page *page;
177 
178 		if (!migrate_pfn[i])
179 			continue;
180 
181 		page = migrate_pfn_to_page(migrate_pfn[i]);
182 		drm_pagemap_migration_unlock_put_page(page);
183 		migrate_pfn[i] = 0;
184 	}
185 }
186 
187 /**
188  * drm_pagemap_get_devmem_page() - Get a reference to a device memory page
189  * @page: Pointer to the page
190  * @zdd: Pointer to the GPU SVM zone device data
191  *
192  * This function associates the given page with the specified GPU SVM zone
193  * device data and initializes it for zone device usage.
194  */
drm_pagemap_get_devmem_page(struct page * page,struct drm_pagemap_zdd * zdd)195 static void drm_pagemap_get_devmem_page(struct page *page,
196 					struct drm_pagemap_zdd *zdd)
197 {
198 	page->zone_device_data = drm_pagemap_zdd_get(zdd);
199 	zone_device_page_init(page);
200 }
201 
202 /**
203  * drm_pagemap_migrate_map_pages() - Map migration pages for GPU SVM migration
204  * @dev: The device for which the pages are being mapped
205  * @dma_addr: Array to store DMA addresses corresponding to mapped pages
206  * @migrate_pfn: Array of migrate page frame numbers to map
207  * @npages: Number of pages to map
208  * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
209  *
210  * This function maps pages of memory for migration usage in GPU SVM. It
211  * iterates over each page frame number provided in @migrate_pfn, maps the
212  * corresponding page, and stores the DMA address in the provided @dma_addr
213  * array.
214  *
215  * Returns: 0 on success, -EFAULT if an error occurs during mapping.
216  */
drm_pagemap_migrate_map_pages(struct device * dev,dma_addr_t * dma_addr,unsigned long * migrate_pfn,unsigned long npages,enum dma_data_direction dir)217 static int drm_pagemap_migrate_map_pages(struct device *dev,
218 					 dma_addr_t *dma_addr,
219 					 unsigned long *migrate_pfn,
220 					 unsigned long npages,
221 					 enum dma_data_direction dir)
222 {
223 	unsigned long i;
224 
225 	for (i = 0; i < npages; ++i) {
226 		struct page *page = migrate_pfn_to_page(migrate_pfn[i]);
227 
228 		if (!page)
229 			continue;
230 
231 		if (WARN_ON_ONCE(is_zone_device_page(page)))
232 			return -EFAULT;
233 
234 		dma_addr[i] = dma_map_page(dev, page, 0, PAGE_SIZE, dir);
235 		if (dma_mapping_error(dev, dma_addr[i]))
236 			return -EFAULT;
237 	}
238 
239 	return 0;
240 }
241 
242 /**
243  * drm_pagemap_migrate_unmap_pages() - Unmap pages previously mapped for GPU SVM migration
244  * @dev: The device for which the pages were mapped
245  * @dma_addr: Array of DMA addresses corresponding to mapped pages
246  * @npages: Number of pages to unmap
247  * @dir: Direction of data transfer (e.g., DMA_BIDIRECTIONAL)
248  *
249  * This function unmaps previously mapped pages of memory for GPU Shared Virtual
250  * Memory (SVM). It iterates over each DMA address provided in @dma_addr, checks
251  * if it's valid and not already unmapped, and unmaps the corresponding page.
252  */
drm_pagemap_migrate_unmap_pages(struct device * dev,dma_addr_t * dma_addr,unsigned long npages,enum dma_data_direction dir)253 static void drm_pagemap_migrate_unmap_pages(struct device *dev,
254 					    dma_addr_t *dma_addr,
255 					    unsigned long npages,
256 					    enum dma_data_direction dir)
257 {
258 	unsigned long i;
259 
260 	for (i = 0; i < npages; ++i) {
261 		if (!dma_addr[i] || dma_mapping_error(dev, dma_addr[i]))
262 			continue;
263 
264 		dma_unmap_page(dev, dma_addr[i], PAGE_SIZE, dir);
265 	}
266 }
267 
268 static unsigned long
npages_in_range(unsigned long start,unsigned long end)269 npages_in_range(unsigned long start, unsigned long end)
270 {
271 	return (end - start) >> PAGE_SHIFT;
272 }
273 
274 /**
275  * drm_pagemap_migrate_to_devmem() - Migrate a struct mm_struct range to device memory
276  * @devmem_allocation: The device memory allocation to migrate to.
277  * The caller should hold a reference to the device memory allocation,
278  * and the reference is consumed by this function unless it returns with
279  * an error.
280  * @mm: Pointer to the struct mm_struct.
281  * @start: Start of the virtual address range to migrate.
282  * @end: End of the virtual address range to migrate.
283  * @timeslice_ms: The time requested for the migrated pagemap pages to
284  * be present in @mm before being allowed to be migrated back.
285  * @pgmap_owner: Not used currently, since only system memory is considered.
286  *
287  * This function migrates the specified virtual address range to device memory.
288  * It performs the necessary setup and invokes the driver-specific operations for
289  * migration to device memory. Expected to be called while holding the mmap lock in
290  * at least read mode.
291  *
292  * Note: The @timeslice_ms parameter can typically be used to force data to
293  * remain in pagemap pages long enough for a GPU to perform a task and to prevent
294  * a migration livelock. One alternative would be for the GPU driver to block
295  * in a mmu_notifier for the specified amount of time, but adding the
296  * functionality to the pagemap is likely nicer to the system as a whole.
297  *
298  * Return: %0 on success, negative error code on failure.
299  */
drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem * devmem_allocation,struct mm_struct * mm,unsigned long start,unsigned long end,unsigned long timeslice_ms,void * pgmap_owner)300 int drm_pagemap_migrate_to_devmem(struct drm_pagemap_devmem *devmem_allocation,
301 				  struct mm_struct *mm,
302 				  unsigned long start, unsigned long end,
303 				  unsigned long timeslice_ms,
304 				  void *pgmap_owner)
305 {
306 	const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
307 	struct migrate_vma migrate = {
308 		.start		= start,
309 		.end		= end,
310 		.pgmap_owner	= pgmap_owner,
311 		.flags		= MIGRATE_VMA_SELECT_SYSTEM,
312 	};
313 	unsigned long i, npages = npages_in_range(start, end);
314 	struct vm_area_struct *vas;
315 	struct drm_pagemap_zdd *zdd = NULL;
316 	struct page **pages;
317 	dma_addr_t *dma_addr;
318 	void *buf;
319 	int err;
320 
321 	mmap_assert_locked(mm);
322 
323 	if (!ops->populate_devmem_pfn || !ops->copy_to_devmem ||
324 	    !ops->copy_to_ram)
325 		return -EOPNOTSUPP;
326 
327 	vas = vma_lookup(mm, start);
328 	if (!vas) {
329 		err = -ENOENT;
330 		goto err_out;
331 	}
332 
333 	if (end > vas->vm_end || start < vas->vm_start) {
334 		err = -EINVAL;
335 		goto err_out;
336 	}
337 
338 	if (!vma_is_anonymous(vas)) {
339 		err = -EBUSY;
340 		goto err_out;
341 	}
342 
343 	buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
344 		       sizeof(*pages), GFP_KERNEL);
345 	if (!buf) {
346 		err = -ENOMEM;
347 		goto err_out;
348 	}
349 	dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
350 	pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
351 
352 	zdd = drm_pagemap_zdd_alloc(pgmap_owner);
353 	if (!zdd) {
354 		err = -ENOMEM;
355 		goto err_free;
356 	}
357 
358 	migrate.vma = vas;
359 	migrate.src = buf;
360 	migrate.dst = migrate.src + npages;
361 
362 	err = migrate_vma_setup(&migrate);
363 	if (err)
364 		goto err_free;
365 
366 	if (!migrate.cpages) {
367 		err = -EFAULT;
368 		goto err_free;
369 	}
370 
371 	if (migrate.cpages != npages) {
372 		err = -EBUSY;
373 		goto err_finalize;
374 	}
375 
376 	err = ops->populate_devmem_pfn(devmem_allocation, npages, migrate.dst);
377 	if (err)
378 		goto err_finalize;
379 
380 	err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
381 					    migrate.src, npages, DMA_TO_DEVICE);
382 	if (err)
383 		goto err_finalize;
384 
385 	for (i = 0; i < npages; ++i) {
386 		struct page *page = pfn_to_page(migrate.dst[i]);
387 
388 		pages[i] = page;
389 		migrate.dst[i] = migrate_pfn(migrate.dst[i]);
390 		drm_pagemap_get_devmem_page(page, zdd);
391 	}
392 
393 	err = ops->copy_to_devmem(pages, dma_addr, npages);
394 	if (err)
395 		goto err_finalize;
396 
397 	/* Upon success bind devmem allocation to range and zdd */
398 	devmem_allocation->timeslice_expiration = get_jiffies_64() +
399 		msecs_to_jiffies(timeslice_ms);
400 	zdd->devmem_allocation = devmem_allocation;	/* Owns ref */
401 
402 err_finalize:
403 	if (err)
404 		drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
405 	migrate_vma_pages(&migrate);
406 	migrate_vma_finalize(&migrate);
407 	drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
408 					DMA_TO_DEVICE);
409 err_free:
410 	if (zdd)
411 		drm_pagemap_zdd_put(zdd);
412 	kvfree(buf);
413 err_out:
414 	return err;
415 }
416 EXPORT_SYMBOL_GPL(drm_pagemap_migrate_to_devmem);
417 
418 /**
419  * drm_pagemap_migrate_populate_ram_pfn() - Populate RAM PFNs for a VM area
420  * @vas: Pointer to the VM area structure, can be NULL
421  * @fault_page: Fault page
422  * @npages: Number of pages to populate
423  * @mpages: Number of pages to migrate
424  * @src_mpfn: Source array of migrate PFNs
425  * @mpfn: Array of migrate PFNs to populate
426  * @addr: Start address for PFN allocation
427  *
428  * This function populates the RAM migrate page frame numbers (PFNs) for the
429  * specified VM area structure. It allocates and locks pages in the VM area for
430  * RAM usage. If vas is non-NULL use alloc_page_vma for allocation, if NULL use
431  * alloc_page for allocation.
432  *
433  * Return: 0 on success, negative error code on failure.
434  */
drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct * vas,struct page * fault_page,unsigned long npages,unsigned long * mpages,unsigned long * src_mpfn,unsigned long * mpfn,unsigned long addr)435 static int drm_pagemap_migrate_populate_ram_pfn(struct vm_area_struct *vas,
436 						struct page *fault_page,
437 						unsigned long npages,
438 						unsigned long *mpages,
439 						unsigned long *src_mpfn,
440 						unsigned long *mpfn,
441 						unsigned long addr)
442 {
443 	unsigned long i;
444 
445 	for (i = 0; i < npages; ++i, addr += PAGE_SIZE) {
446 		struct page *page, *src_page;
447 
448 		if (!(src_mpfn[i] & MIGRATE_PFN_MIGRATE))
449 			continue;
450 
451 		src_page = migrate_pfn_to_page(src_mpfn[i]);
452 		if (!src_page)
453 			continue;
454 
455 		if (fault_page) {
456 			if (src_page->zone_device_data !=
457 			    fault_page->zone_device_data)
458 				continue;
459 		}
460 
461 		if (vas)
462 			page = alloc_page_vma(GFP_HIGHUSER, vas, addr);
463 		else
464 			page = alloc_page(GFP_HIGHUSER);
465 
466 		if (!page)
467 			goto free_pages;
468 
469 		mpfn[i] = migrate_pfn(page_to_pfn(page));
470 	}
471 
472 	for (i = 0; i < npages; ++i) {
473 		struct page *page = migrate_pfn_to_page(mpfn[i]);
474 
475 		if (!page)
476 			continue;
477 
478 		WARN_ON_ONCE(!trylock_page(page));
479 		++*mpages;
480 	}
481 
482 	return 0;
483 
484 free_pages:
485 	for (i = 0; i < npages; ++i) {
486 		struct page *page = migrate_pfn_to_page(mpfn[i]);
487 
488 		if (!page)
489 			continue;
490 
491 		put_page(page);
492 		mpfn[i] = 0;
493 	}
494 	return -ENOMEM;
495 }
496 
497 /**
498  * drm_pagemap_evict_to_ram() - Evict GPU SVM range to RAM
499  * @devmem_allocation: Pointer to the device memory allocation
500  *
501  * Similar to __drm_pagemap_migrate_to_ram but does not require mmap lock and
502  * migration done via migrate_device_* functions.
503  *
504  * Return: 0 on success, negative error code on failure.
505  */
drm_pagemap_evict_to_ram(struct drm_pagemap_devmem * devmem_allocation)506 int drm_pagemap_evict_to_ram(struct drm_pagemap_devmem *devmem_allocation)
507 {
508 	const struct drm_pagemap_devmem_ops *ops = devmem_allocation->ops;
509 	unsigned long npages, mpages = 0;
510 	struct page **pages;
511 	unsigned long *src, *dst;
512 	dma_addr_t *dma_addr;
513 	void *buf;
514 	int i, err = 0;
515 	unsigned int retry_count = 2;
516 
517 	npages = devmem_allocation->size >> PAGE_SHIFT;
518 
519 retry:
520 	if (!mmget_not_zero(devmem_allocation->mm))
521 		return -EFAULT;
522 
523 	buf = kvcalloc(npages, 2 * sizeof(*src) + sizeof(*dma_addr) +
524 		       sizeof(*pages), GFP_KERNEL);
525 	if (!buf) {
526 		err = -ENOMEM;
527 		goto err_out;
528 	}
529 	src = buf;
530 	dst = buf + (sizeof(*src) * npages);
531 	dma_addr = buf + (2 * sizeof(*src) * npages);
532 	pages = buf + (2 * sizeof(*src) + sizeof(*dma_addr)) * npages;
533 
534 	err = ops->populate_devmem_pfn(devmem_allocation, npages, src);
535 	if (err)
536 		goto err_free;
537 
538 	err = migrate_device_pfns(src, npages);
539 	if (err)
540 		goto err_free;
541 
542 	err = drm_pagemap_migrate_populate_ram_pfn(NULL, NULL, npages, &mpages,
543 						   src, dst, 0);
544 	if (err || !mpages)
545 		goto err_finalize;
546 
547 	err = drm_pagemap_migrate_map_pages(devmem_allocation->dev, dma_addr,
548 					    dst, npages, DMA_FROM_DEVICE);
549 	if (err)
550 		goto err_finalize;
551 
552 	for (i = 0; i < npages; ++i)
553 		pages[i] = migrate_pfn_to_page(src[i]);
554 
555 	err = ops->copy_to_ram(pages, dma_addr, npages);
556 	if (err)
557 		goto err_finalize;
558 
559 err_finalize:
560 	if (err)
561 		drm_pagemap_migration_unlock_put_pages(npages, dst);
562 	migrate_device_pages(src, dst, npages);
563 	migrate_device_finalize(src, dst, npages);
564 	drm_pagemap_migrate_unmap_pages(devmem_allocation->dev, dma_addr, npages,
565 					DMA_FROM_DEVICE);
566 err_free:
567 	kvfree(buf);
568 err_out:
569 	mmput_async(devmem_allocation->mm);
570 
571 	if (completion_done(&devmem_allocation->detached))
572 		return 0;
573 
574 	if (retry_count--) {
575 		cond_resched();
576 		goto retry;
577 	}
578 
579 	return err ?: -EBUSY;
580 }
581 EXPORT_SYMBOL_GPL(drm_pagemap_evict_to_ram);
582 
583 /**
584  * __drm_pagemap_migrate_to_ram() - Migrate GPU SVM range to RAM (internal)
585  * @vas: Pointer to the VM area structure
586  * @device_private_page_owner: Device private pages owner
587  * @page: Pointer to the page for fault handling (can be NULL)
588  * @fault_addr: Fault address
589  * @size: Size of migration
590  *
591  * This internal function performs the migration of the specified GPU SVM range
592  * to RAM. It sets up the migration, populates + dma maps RAM PFNs, and
593  * invokes the driver-specific operations for migration to RAM.
594  *
595  * Return: 0 on success, negative error code on failure.
596  */
__drm_pagemap_migrate_to_ram(struct vm_area_struct * vas,void * device_private_page_owner,struct page * page,unsigned long fault_addr,unsigned long size)597 static int __drm_pagemap_migrate_to_ram(struct vm_area_struct *vas,
598 					void *device_private_page_owner,
599 					struct page *page,
600 					unsigned long fault_addr,
601 					unsigned long size)
602 {
603 	struct migrate_vma migrate = {
604 		.vma		= vas,
605 		.pgmap_owner	= device_private_page_owner,
606 		.flags		= MIGRATE_VMA_SELECT_DEVICE_PRIVATE |
607 		MIGRATE_VMA_SELECT_DEVICE_COHERENT,
608 		.fault_page	= page,
609 	};
610 	struct drm_pagemap_zdd *zdd;
611 	const struct drm_pagemap_devmem_ops *ops;
612 	struct device *dev = NULL;
613 	unsigned long npages, mpages = 0;
614 	struct page **pages;
615 	dma_addr_t *dma_addr;
616 	unsigned long start, end;
617 	void *buf;
618 	int i, err = 0;
619 
620 	if (page) {
621 		zdd = page->zone_device_data;
622 		if (time_before64(get_jiffies_64(),
623 				  zdd->devmem_allocation->timeslice_expiration))
624 			return 0;
625 	}
626 
627 	start = ALIGN_DOWN(fault_addr, size);
628 	end = ALIGN(fault_addr + 1, size);
629 
630 	/* Corner where VMA area struct has been partially unmapped */
631 	if (start < vas->vm_start)
632 		start = vas->vm_start;
633 	if (end > vas->vm_end)
634 		end = vas->vm_end;
635 
636 	migrate.start = start;
637 	migrate.end = end;
638 	npages = npages_in_range(start, end);
639 
640 	buf = kvcalloc(npages, 2 * sizeof(*migrate.src) + sizeof(*dma_addr) +
641 		       sizeof(*pages), GFP_KERNEL);
642 	if (!buf) {
643 		err = -ENOMEM;
644 		goto err_out;
645 	}
646 	dma_addr = buf + (2 * sizeof(*migrate.src) * npages);
647 	pages = buf + (2 * sizeof(*migrate.src) + sizeof(*dma_addr)) * npages;
648 
649 	migrate.vma = vas;
650 	migrate.src = buf;
651 	migrate.dst = migrate.src + npages;
652 
653 	err = migrate_vma_setup(&migrate);
654 	if (err)
655 		goto err_free;
656 
657 	/* Raced with another CPU fault, nothing to do */
658 	if (!migrate.cpages)
659 		goto err_free;
660 
661 	if (!page) {
662 		for (i = 0; i < npages; ++i) {
663 			if (!(migrate.src[i] & MIGRATE_PFN_MIGRATE))
664 				continue;
665 
666 			page = migrate_pfn_to_page(migrate.src[i]);
667 			break;
668 		}
669 
670 		if (!page)
671 			goto err_finalize;
672 	}
673 	zdd = page->zone_device_data;
674 	ops = zdd->devmem_allocation->ops;
675 	dev = zdd->devmem_allocation->dev;
676 
677 	err = drm_pagemap_migrate_populate_ram_pfn(vas, page, npages, &mpages,
678 						   migrate.src, migrate.dst,
679 						   start);
680 	if (err)
681 		goto err_finalize;
682 
683 	err = drm_pagemap_migrate_map_pages(dev, dma_addr, migrate.dst, npages,
684 					    DMA_FROM_DEVICE);
685 	if (err)
686 		goto err_finalize;
687 
688 	for (i = 0; i < npages; ++i)
689 		pages[i] = migrate_pfn_to_page(migrate.src[i]);
690 
691 	err = ops->copy_to_ram(pages, dma_addr, npages);
692 	if (err)
693 		goto err_finalize;
694 
695 err_finalize:
696 	if (err)
697 		drm_pagemap_migration_unlock_put_pages(npages, migrate.dst);
698 	migrate_vma_pages(&migrate);
699 	migrate_vma_finalize(&migrate);
700 	if (dev)
701 		drm_pagemap_migrate_unmap_pages(dev, dma_addr, npages,
702 						DMA_FROM_DEVICE);
703 err_free:
704 	kvfree(buf);
705 err_out:
706 
707 	return err;
708 }
709 
710 /**
711  * drm_pagemap_page_free() - Put GPU SVM zone device data associated with a page
712  * @page: Pointer to the page
713  *
714  * This function is a callback used to put the GPU SVM zone device data
715  * associated with a page when it is being released.
716  */
drm_pagemap_page_free(struct page * page)717 static void drm_pagemap_page_free(struct page *page)
718 {
719 	drm_pagemap_zdd_put(page->zone_device_data);
720 }
721 
722 /**
723  * drm_pagemap_migrate_to_ram() - Migrate a virtual range to RAM (page fault handler)
724  * @vmf: Pointer to the fault information structure
725  *
726  * This function is a page fault handler used to migrate a virtual range
727  * to ram. The device memory allocation in which the device page is found is
728  * migrated in its entirety.
729  *
730  * Returns:
731  * VM_FAULT_SIGBUS on failure, 0 on success.
732  */
drm_pagemap_migrate_to_ram(struct vm_fault * vmf)733 static vm_fault_t drm_pagemap_migrate_to_ram(struct vm_fault *vmf)
734 {
735 	struct drm_pagemap_zdd *zdd = vmf->page->zone_device_data;
736 	int err;
737 
738 	err = __drm_pagemap_migrate_to_ram(vmf->vma,
739 					   zdd->device_private_page_owner,
740 					   vmf->page, vmf->address,
741 					   zdd->devmem_allocation->size);
742 
743 	return err ? VM_FAULT_SIGBUS : 0;
744 }
745 
746 static const struct dev_pagemap_ops drm_pagemap_pagemap_ops = {
747 	.page_free = drm_pagemap_page_free,
748 	.migrate_to_ram = drm_pagemap_migrate_to_ram,
749 };
750 
751 /**
752  * drm_pagemap_pagemap_ops_get() - Retrieve GPU SVM device page map operations
753  *
754  * Returns:
755  * Pointer to the GPU SVM device page map operations structure.
756  */
drm_pagemap_pagemap_ops_get(void)757 const struct dev_pagemap_ops *drm_pagemap_pagemap_ops_get(void)
758 {
759 	return &drm_pagemap_pagemap_ops;
760 }
761 EXPORT_SYMBOL_GPL(drm_pagemap_pagemap_ops_get);
762 
763 /**
764  * drm_pagemap_devmem_init() - Initialize a drm_pagemap device memory allocation
765  *
766  * @devmem_allocation: The struct drm_pagemap_devmem to initialize.
767  * @dev: Pointer to the device structure which device memory allocation belongs to
768  * @mm: Pointer to the mm_struct for the address space
769  * @ops: Pointer to the operations structure for GPU SVM device memory
770  * @dpagemap: The struct drm_pagemap we're allocating from.
771  * @size: Size of device memory allocation
772  */
drm_pagemap_devmem_init(struct drm_pagemap_devmem * devmem_allocation,struct device * dev,struct mm_struct * mm,const struct drm_pagemap_devmem_ops * ops,struct drm_pagemap * dpagemap,size_t size)773 void drm_pagemap_devmem_init(struct drm_pagemap_devmem *devmem_allocation,
774 			     struct device *dev, struct mm_struct *mm,
775 			     const struct drm_pagemap_devmem_ops *ops,
776 			     struct drm_pagemap *dpagemap, size_t size)
777 {
778 	init_completion(&devmem_allocation->detached);
779 	devmem_allocation->dev = dev;
780 	devmem_allocation->mm = mm;
781 	devmem_allocation->ops = ops;
782 	devmem_allocation->dpagemap = dpagemap;
783 	devmem_allocation->size = size;
784 }
785 EXPORT_SYMBOL_GPL(drm_pagemap_devmem_init);
786 
787 /**
788  * drm_pagemap_page_to_dpagemap() - Return a pointer the drm_pagemap of a page
789  * @page: The struct page.
790  *
791  * Return: A pointer to the struct drm_pagemap of a device private page that
792  * was populated from the struct drm_pagemap. If the page was *not* populated
793  * from a struct drm_pagemap, the result is undefined and the function call
794  * may result in dereferencing and invalid address.
795  */
drm_pagemap_page_to_dpagemap(struct page * page)796 struct drm_pagemap *drm_pagemap_page_to_dpagemap(struct page *page)
797 {
798 	struct drm_pagemap_zdd *zdd = page->zone_device_data;
799 
800 	return zdd->devmem_allocation->dpagemap;
801 }
802 EXPORT_SYMBOL_GPL(drm_pagemap_page_to_dpagemap);
803 
804 /**
805  * drm_pagemap_populate_mm() - Populate a virtual range with device memory pages
806  * @dpagemap: Pointer to the drm_pagemap managing the device memory
807  * @start: Start of the virtual range to populate.
808  * @end: End of the virtual range to populate.
809  * @mm: Pointer to the virtual address space.
810  * @timeslice_ms: The time requested for the migrated pagemap pages to
811  * be present in @mm before being allowed to be migrated back.
812  *
813  * Attempt to populate a virtual range with device memory pages,
814  * clearing them or migrating data from the existing pages if necessary.
815  * The function is best effort only, and implementations may vary
816  * in how hard they try to satisfy the request.
817  *
818  * Return: %0 on success, negative error code on error. If the hardware
819  * device was removed / unbound the function will return %-ENODEV.
820  */
drm_pagemap_populate_mm(struct drm_pagemap * dpagemap,unsigned long start,unsigned long end,struct mm_struct * mm,unsigned long timeslice_ms)821 int drm_pagemap_populate_mm(struct drm_pagemap *dpagemap,
822 			    unsigned long start, unsigned long end,
823 			    struct mm_struct *mm,
824 			    unsigned long timeslice_ms)
825 {
826 	int err;
827 
828 	if (!mmget_not_zero(mm))
829 		return -EFAULT;
830 	mmap_read_lock(mm);
831 	err = dpagemap->ops->populate_mm(dpagemap, start, end, mm,
832 					 timeslice_ms);
833 	mmap_read_unlock(mm);
834 	mmput(mm);
835 
836 	return err;
837 }
838 EXPORT_SYMBOL(drm_pagemap_populate_mm);
839