xref: /linux/drivers/gpu/drm/ttm/ttm_backup.c (revision 547c5775a742d9c83891b629b75d1d4c8e88d8c0)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <drm/ttm/ttm_backup.h>
7 #include <linux/page-flags.h>
8 #include <linux/swap.h>
9 
10 /*
11  * Need to map shmem indices to handle since a handle value
12  * of 0 means error, following the swp_entry_t convention.
13  */
ttm_backup_shmem_idx_to_handle(pgoff_t idx)14 static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx)
15 {
16 	return (unsigned long)idx + 1;
17 }
18 
ttm_backup_handle_to_shmem_idx(pgoff_t handle)19 static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
20 {
21 	return handle - 1;
22 }
23 
24 /**
25  * ttm_backup_drop() - release memory associated with a handle
26  * @backup: The struct backup pointer used to obtain the handle
27  * @handle: The handle obtained from the @backup_page function.
28  */
ttm_backup_drop(struct file * backup,pgoff_t handle)29 void ttm_backup_drop(struct file *backup, pgoff_t handle)
30 {
31 	loff_t start = ttm_backup_handle_to_shmem_idx(handle);
32 
33 	start <<= PAGE_SHIFT;
34 	shmem_truncate_range(file_inode(backup), start,
35 			     start + PAGE_SIZE - 1);
36 }
37 
38 /**
39  * ttm_backup_copy_page() - Copy the contents of a previously backed
40  * up page
41  * @backup: The struct backup pointer used to back up the page.
42  * @dst: The struct page to copy into.
43  * @handle: The handle returned when the page was backed up.
44  * @intr: Try to perform waits interruptible or at least killable.
45  *
46  * Return: 0 on success, Negative error code on failure, notably
47  * -EINTR if @intr was set to true and a signal is pending.
48  */
ttm_backup_copy_page(struct file * backup,struct page * dst,pgoff_t handle,bool intr)49 int ttm_backup_copy_page(struct file *backup, struct page *dst,
50 			 pgoff_t handle, bool intr)
51 {
52 	struct address_space *mapping = backup->f_mapping;
53 	struct folio *from_folio;
54 	pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
55 
56 	from_folio = shmem_read_folio(mapping, idx);
57 	if (IS_ERR(from_folio))
58 		return PTR_ERR(from_folio);
59 
60 	copy_highpage(dst, folio_file_page(from_folio, idx));
61 	folio_put(from_folio);
62 
63 	return 0;
64 }
65 
66 /**
67  * ttm_backup_backup_page() - Backup a page
68  * @backup: The struct backup pointer to use.
69  * @page: The page to back up.
70  * @writeback: Whether to perform immediate writeback of the page.
71  * This may have performance implications.
72  * @idx: A unique integer for each page and each struct backup.
73  * This allows the backup implementation to avoid managing
74  * its address space separately.
75  * @page_gfp: The gfp value used when the page was allocated.
76  * This is used for accounting purposes.
77  * @alloc_gfp: The gfp to be used when allocating memory.
78  *
79  * Context: If called from reclaim context, the caller needs to
80  * assert that the shrinker gfp has __GFP_FS set, to avoid
81  * deadlocking on lock_page(). If @writeback is set to true and
82  * called from reclaim context, the caller also needs to assert
83  * that the shrinker gfp has __GFP_IO set, since without it,
84  * we're not allowed to start backup IO.
85  *
86  * Return: A handle on success. Negative error code on failure.
87  *
88  * Note: This function could be extended to back up a folio and
89  * implementations would then split the folio internally if needed.
90  * Drawback is that the caller would then have to keep track of
91  * the folio size- and usage.
92  */
93 s64
ttm_backup_backup_page(struct file * backup,struct page * page,bool writeback,pgoff_t idx,gfp_t page_gfp,gfp_t alloc_gfp)94 ttm_backup_backup_page(struct file *backup, struct page *page,
95 		       bool writeback, pgoff_t idx, gfp_t page_gfp,
96 		       gfp_t alloc_gfp)
97 {
98 	struct address_space *mapping = backup->f_mapping;
99 	unsigned long handle = 0;
100 	struct folio *to_folio;
101 	int ret;
102 
103 	to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp);
104 	if (IS_ERR(to_folio))
105 		return PTR_ERR(to_folio);
106 
107 	folio_mark_accessed(to_folio);
108 	folio_lock(to_folio);
109 	folio_mark_dirty(to_folio);
110 	copy_highpage(folio_file_page(to_folio, idx), page);
111 	handle = ttm_backup_shmem_idx_to_handle(idx);
112 
113 	if (writeback && !folio_mapped(to_folio) &&
114 	    folio_clear_dirty_for_io(to_folio)) {
115 		struct writeback_control wbc = {
116 			.sync_mode = WB_SYNC_NONE,
117 			.nr_to_write = SWAP_CLUSTER_MAX,
118 			.range_start = 0,
119 			.range_end = LLONG_MAX,
120 			.for_reclaim = 1,
121 		};
122 		folio_set_reclaim(to_folio);
123 		ret = shmem_writeout(to_folio, &wbc);
124 		if (!folio_test_writeback(to_folio))
125 			folio_clear_reclaim(to_folio);
126 		/*
127 		 * If writeout succeeds, it unlocks the folio.	errors
128 		 * are otherwise dropped, since writeout is only best
129 		 * effort here.
130 		 */
131 		if (ret)
132 			folio_unlock(to_folio);
133 	} else {
134 		folio_unlock(to_folio);
135 	}
136 
137 	folio_put(to_folio);
138 
139 	return handle;
140 }
141 
142 /**
143  * ttm_backup_fini() - Free the struct backup resources after last use.
144  * @backup: Pointer to the struct backup whose resources to free.
145  *
146  * After a call to this function, it's illegal to use the @backup pointer.
147  */
ttm_backup_fini(struct file * backup)148 void ttm_backup_fini(struct file *backup)
149 {
150 	fput(backup);
151 }
152 
153 /**
154  * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space
155  * left for backup.
156  *
157  * This function is intended also for driver use to indicate whether a
158  * backup attempt is meaningful.
159  *
160  * Return: An approximate size of backup space available.
161  */
ttm_backup_bytes_avail(void)162 u64 ttm_backup_bytes_avail(void)
163 {
164 	/*
165 	 * The idea behind backing up to shmem is that shmem objects may
166 	 * eventually be swapped out. So no point swapping out if there
167 	 * is no or low swap-space available. But the accuracy of this
168 	 * number also depends on shmem actually swapping out backed-up
169 	 * shmem objects without too much buffering.
170 	 */
171 	return (u64)get_nr_swap_pages() << PAGE_SHIFT;
172 }
173 EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
174 
175 /**
176  * ttm_backup_shmem_create() - Create a shmem-based struct backup.
177  * @size: The maximum size (in bytes) to back up.
178  *
179  * Create a backup utilizing shmem objects.
180  *
181  * Return: A pointer to a struct file on success,
182  * an error pointer on error.
183  */
ttm_backup_shmem_create(loff_t size)184 struct file *ttm_backup_shmem_create(loff_t size)
185 {
186 	return shmem_file_setup("ttm shmem backup", size, 0);
187 }
188