xref: /linux/drivers/gpu/drm/ttm/ttm_backup.c (revision f694f30e81c4ade358eb8c75273bac1a48f0cb8f)
1 // SPDX-License-Identifier: MIT
2 /*
3  * Copyright © 2024 Intel Corporation
4  */
5 
6 #include <drm/ttm/ttm_backup.h>
7 #include <linux/page-flags.h>
8 #include <linux/swap.h>
9 
10 /*
11  * Casting from randomized struct file * to struct ttm_backup * is fine since
12  * struct ttm_backup is never defined nor dereferenced.
13  */
14 static struct file *ttm_backup_to_file(struct ttm_backup *backup)
15 {
16 	return (void *)backup;
17 }
18 
19 static struct ttm_backup *ttm_file_to_backup(struct file *file)
20 {
21 	return (void *)file;
22 }
23 
24 /*
25  * Need to map shmem indices to handle since a handle value
26  * of 0 means error, following the swp_entry_t convention.
27  */
28 static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx)
29 {
30 	return (unsigned long)idx + 1;
31 }
32 
33 static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
34 {
35 	return handle - 1;
36 }
37 
38 /**
39  * ttm_backup_drop() - release memory associated with a handle
40  * @backup: The struct backup pointer used to obtain the handle
41  * @handle: The handle obtained from the @backup_page function.
42  */
43 void ttm_backup_drop(struct ttm_backup *backup, pgoff_t handle)
44 {
45 	loff_t start = ttm_backup_handle_to_shmem_idx(handle);
46 
47 	start <<= PAGE_SHIFT;
48 	shmem_truncate_range(file_inode(ttm_backup_to_file(backup)), start,
49 			     start + PAGE_SIZE - 1);
50 }
51 
52 /**
53  * ttm_backup_copy_page() - Copy the contents of a previously backed
54  * up page
55  * @backup: The struct backup pointer used to back up the page.
56  * @dst: The struct page to copy into.
57  * @handle: The handle returned when the page was backed up.
58  * @intr: Try to perform waits interruptable or at least killable.
59  *
60  * Return: 0 on success, Negative error code on failure, notably
61  * -EINTR if @intr was set to true and a signal is pending.
62  */
63 int ttm_backup_copy_page(struct ttm_backup *backup, struct page *dst,
64 			 pgoff_t handle, bool intr)
65 {
66 	struct file *filp = ttm_backup_to_file(backup);
67 	struct address_space *mapping = filp->f_mapping;
68 	struct folio *from_folio;
69 	pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
70 
71 	from_folio = shmem_read_folio(mapping, idx);
72 	if (IS_ERR(from_folio))
73 		return PTR_ERR(from_folio);
74 
75 	copy_highpage(dst, folio_file_page(from_folio, idx));
76 	folio_put(from_folio);
77 
78 	return 0;
79 }
80 
81 /**
82  * ttm_backup_backup_page() - Backup a page
83  * @backup: The struct backup pointer to use.
84  * @page: The page to back up.
85  * @writeback: Whether to perform immediate writeback of the page.
86  * This may have performance implications.
87  * @idx: A unique integer for each page and each struct backup.
88  * This allows the backup implementation to avoid managing
89  * its address space separately.
90  * @page_gfp: The gfp value used when the page was allocated.
91  * This is used for accounting purposes.
92  * @alloc_gfp: The gfp to be used when allocating memory.
93  *
94  * Context: If called from reclaim context, the caller needs to
95  * assert that the shrinker gfp has __GFP_FS set, to avoid
96  * deadlocking on lock_page(). If @writeback is set to true and
97  * called from reclaim context, the caller also needs to assert
98  * that the shrinker gfp has __GFP_IO set, since without it,
99  * we're not allowed to start backup IO.
100  *
101  * Return: A handle on success. Negative error code on failure.
102  *
103  * Note: This function could be extended to back up a folio and
104  * implementations would then split the folio internally if needed.
105  * Drawback is that the caller would then have to keep track of
106  * the folio size- and usage.
107  */
108 s64
109 ttm_backup_backup_page(struct ttm_backup *backup, struct page *page,
110 		       bool writeback, pgoff_t idx, gfp_t page_gfp,
111 		       gfp_t alloc_gfp)
112 {
113 	struct file *filp = ttm_backup_to_file(backup);
114 	struct address_space *mapping = filp->f_mapping;
115 	unsigned long handle = 0;
116 	struct folio *to_folio;
117 	int ret;
118 
119 	to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp);
120 	if (IS_ERR(to_folio))
121 		return PTR_ERR(to_folio);
122 
123 	folio_mark_accessed(to_folio);
124 	folio_lock(to_folio);
125 	folio_mark_dirty(to_folio);
126 	copy_highpage(folio_file_page(to_folio, idx), page);
127 	handle = ttm_backup_shmem_idx_to_handle(idx);
128 
129 	if (writeback && !folio_mapped(to_folio) &&
130 	    folio_clear_dirty_for_io(to_folio)) {
131 		struct writeback_control wbc = {
132 			.sync_mode = WB_SYNC_NONE,
133 			.nr_to_write = SWAP_CLUSTER_MAX,
134 			.range_start = 0,
135 			.range_end = LLONG_MAX,
136 			.for_reclaim = 1,
137 		};
138 		folio_set_reclaim(to_folio);
139 		ret = mapping->a_ops->writepage(folio_file_page(to_folio, idx), &wbc);
140 		if (!folio_test_writeback(to_folio))
141 			folio_clear_reclaim(to_folio);
142 		/*
143 		 * If writepage succeeds, it unlocks the folio.
144 		 * writepage() errors are otherwise dropped, since writepage()
145 		 * is only best effort here.
146 		 */
147 		if (ret)
148 			folio_unlock(to_folio);
149 	} else {
150 		folio_unlock(to_folio);
151 	}
152 
153 	folio_put(to_folio);
154 
155 	return handle;
156 }
157 
158 /**
159  * ttm_backup_fini() - Free the struct backup resources after last use.
160  * @backup: Pointer to the struct backup whose resources to free.
161  *
162  * After a call to this function, it's illegal to use the @backup pointer.
163  */
164 void ttm_backup_fini(struct ttm_backup *backup)
165 {
166 	fput(ttm_backup_to_file(backup));
167 }
168 
169 /**
170  * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space
171  * left for backup.
172  *
173  * This function is intended also for driver use to indicate whether a
174  * backup attempt is meaningful.
175  *
176  * Return: An approximate size of backup space available.
177  */
178 u64 ttm_backup_bytes_avail(void)
179 {
180 	/*
181 	 * The idea behind backing up to shmem is that shmem objects may
182 	 * eventually be swapped out. So no point swapping out if there
183 	 * is no or low swap-space available. But the accuracy of this
184 	 * number also depends on shmem actually swapping out backed-up
185 	 * shmem objects without too much buffering.
186 	 */
187 	return (u64)get_nr_swap_pages() << PAGE_SHIFT;
188 }
189 EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
190 
191 /**
192  * ttm_backup_shmem_create() - Create a shmem-based struct backup.
193  * @size: The maximum size (in bytes) to back up.
194  *
195  * Create a backup utilizing shmem objects.
196  *
197  * Return: A pointer to a struct ttm_backup on success,
198  * an error pointer on error.
199  */
200 struct ttm_backup *ttm_backup_shmem_create(loff_t size)
201 {
202 	struct file *filp;
203 
204 	filp = shmem_file_setup("ttm shmem backup", size, 0);
205 
206 	return ttm_file_to_backup(filp);
207 }
208