1e7b5d23eSThomas Hellström // SPDX-License-Identifier: MIT
2e7b5d23eSThomas Hellström /*
3e7b5d23eSThomas Hellström * Copyright © 2024 Intel Corporation
4e7b5d23eSThomas Hellström */
5e7b5d23eSThomas Hellström
6e7b5d23eSThomas Hellström #include <drm/ttm/ttm_backup.h>
7e7b5d23eSThomas Hellström #include <linux/page-flags.h>
8e7b5d23eSThomas Hellström #include <linux/swap.h>
9e7b5d23eSThomas Hellström
10e7b5d23eSThomas Hellström /*
11e7b5d23eSThomas Hellström * Need to map shmem indices to handle since a handle value
12e7b5d23eSThomas Hellström * of 0 means error, following the swp_entry_t convention.
13e7b5d23eSThomas Hellström */
ttm_backup_shmem_idx_to_handle(pgoff_t idx)14e7b5d23eSThomas Hellström static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx)
15e7b5d23eSThomas Hellström {
16e7b5d23eSThomas Hellström return (unsigned long)idx + 1;
17e7b5d23eSThomas Hellström }
18e7b5d23eSThomas Hellström
ttm_backup_handle_to_shmem_idx(pgoff_t handle)19e7b5d23eSThomas Hellström static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle)
20e7b5d23eSThomas Hellström {
21e7b5d23eSThomas Hellström return handle - 1;
22e7b5d23eSThomas Hellström }
23e7b5d23eSThomas Hellström
24e7b5d23eSThomas Hellström /**
25e7b5d23eSThomas Hellström * ttm_backup_drop() - release memory associated with a handle
26e7b5d23eSThomas Hellström * @backup: The struct backup pointer used to obtain the handle
27e7b5d23eSThomas Hellström * @handle: The handle obtained from the @backup_page function.
28e7b5d23eSThomas Hellström */
ttm_backup_drop(struct file * backup,pgoff_t handle)29*d4ad53adSThomas Hellström void ttm_backup_drop(struct file *backup, pgoff_t handle)
30e7b5d23eSThomas Hellström {
31e7b5d23eSThomas Hellström loff_t start = ttm_backup_handle_to_shmem_idx(handle);
32e7b5d23eSThomas Hellström
33e7b5d23eSThomas Hellström start <<= PAGE_SHIFT;
34*d4ad53adSThomas Hellström shmem_truncate_range(file_inode(backup), start,
35e7b5d23eSThomas Hellström start + PAGE_SIZE - 1);
36e7b5d23eSThomas Hellström }
37e7b5d23eSThomas Hellström
38e7b5d23eSThomas Hellström /**
39e7b5d23eSThomas Hellström * ttm_backup_copy_page() - Copy the contents of a previously backed
40e7b5d23eSThomas Hellström * up page
41e7b5d23eSThomas Hellström * @backup: The struct backup pointer used to back up the page.
42e7b5d23eSThomas Hellström * @dst: The struct page to copy into.
43e7b5d23eSThomas Hellström * @handle: The handle returned when the page was backed up.
442bb04ea9SThomas Hellström * @intr: Try to perform waits interruptible or at least killable.
45e7b5d23eSThomas Hellström *
46e7b5d23eSThomas Hellström * Return: 0 on success, Negative error code on failure, notably
47e7b5d23eSThomas Hellström * -EINTR if @intr was set to true and a signal is pending.
48e7b5d23eSThomas Hellström */
ttm_backup_copy_page(struct file * backup,struct page * dst,pgoff_t handle,bool intr)49*d4ad53adSThomas Hellström int ttm_backup_copy_page(struct file *backup, struct page *dst,
50e7b5d23eSThomas Hellström pgoff_t handle, bool intr)
51e7b5d23eSThomas Hellström {
52*d4ad53adSThomas Hellström struct address_space *mapping = backup->f_mapping;
53e7b5d23eSThomas Hellström struct folio *from_folio;
54e7b5d23eSThomas Hellström pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle);
55e7b5d23eSThomas Hellström
56e7b5d23eSThomas Hellström from_folio = shmem_read_folio(mapping, idx);
57e7b5d23eSThomas Hellström if (IS_ERR(from_folio))
58e7b5d23eSThomas Hellström return PTR_ERR(from_folio);
59e7b5d23eSThomas Hellström
60e7b5d23eSThomas Hellström copy_highpage(dst, folio_file_page(from_folio, idx));
61e7b5d23eSThomas Hellström folio_put(from_folio);
62e7b5d23eSThomas Hellström
63e7b5d23eSThomas Hellström return 0;
64e7b5d23eSThomas Hellström }
65e7b5d23eSThomas Hellström
66e7b5d23eSThomas Hellström /**
67e7b5d23eSThomas Hellström * ttm_backup_backup_page() - Backup a page
68e7b5d23eSThomas Hellström * @backup: The struct backup pointer to use.
69e7b5d23eSThomas Hellström * @page: The page to back up.
70e7b5d23eSThomas Hellström * @writeback: Whether to perform immediate writeback of the page.
71e7b5d23eSThomas Hellström * This may have performance implications.
72e7b5d23eSThomas Hellström * @idx: A unique integer for each page and each struct backup.
73e7b5d23eSThomas Hellström * This allows the backup implementation to avoid managing
74e7b5d23eSThomas Hellström * its address space separately.
75e7b5d23eSThomas Hellström * @page_gfp: The gfp value used when the page was allocated.
76e7b5d23eSThomas Hellström * This is used for accounting purposes.
77e7b5d23eSThomas Hellström * @alloc_gfp: The gfp to be used when allocating memory.
78e7b5d23eSThomas Hellström *
79e7b5d23eSThomas Hellström * Context: If called from reclaim context, the caller needs to
80e7b5d23eSThomas Hellström * assert that the shrinker gfp has __GFP_FS set, to avoid
81e7b5d23eSThomas Hellström * deadlocking on lock_page(). If @writeback is set to true and
82e7b5d23eSThomas Hellström * called from reclaim context, the caller also needs to assert
83e7b5d23eSThomas Hellström * that the shrinker gfp has __GFP_IO set, since without it,
84e7b5d23eSThomas Hellström * we're not allowed to start backup IO.
85e7b5d23eSThomas Hellström *
86e7b5d23eSThomas Hellström * Return: A handle on success. Negative error code on failure.
87e7b5d23eSThomas Hellström *
88e7b5d23eSThomas Hellström * Note: This function could be extended to back up a folio and
89e7b5d23eSThomas Hellström * implementations would then split the folio internally if needed.
90e7b5d23eSThomas Hellström * Drawback is that the caller would then have to keep track of
91e7b5d23eSThomas Hellström * the folio size- and usage.
92e7b5d23eSThomas Hellström */
93e7b5d23eSThomas Hellström s64
ttm_backup_backup_page(struct file * backup,struct page * page,bool writeback,pgoff_t idx,gfp_t page_gfp,gfp_t alloc_gfp)94*d4ad53adSThomas Hellström ttm_backup_backup_page(struct file *backup, struct page *page,
95e7b5d23eSThomas Hellström bool writeback, pgoff_t idx, gfp_t page_gfp,
96e7b5d23eSThomas Hellström gfp_t alloc_gfp)
97e7b5d23eSThomas Hellström {
98*d4ad53adSThomas Hellström struct address_space *mapping = backup->f_mapping;
99e7b5d23eSThomas Hellström unsigned long handle = 0;
100e7b5d23eSThomas Hellström struct folio *to_folio;
101e7b5d23eSThomas Hellström int ret;
102e7b5d23eSThomas Hellström
103e7b5d23eSThomas Hellström to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp);
104e7b5d23eSThomas Hellström if (IS_ERR(to_folio))
105e7b5d23eSThomas Hellström return PTR_ERR(to_folio);
106e7b5d23eSThomas Hellström
107e7b5d23eSThomas Hellström folio_mark_accessed(to_folio);
108e7b5d23eSThomas Hellström folio_lock(to_folio);
109e7b5d23eSThomas Hellström folio_mark_dirty(to_folio);
110e7b5d23eSThomas Hellström copy_highpage(folio_file_page(to_folio, idx), page);
111e7b5d23eSThomas Hellström handle = ttm_backup_shmem_idx_to_handle(idx);
112e7b5d23eSThomas Hellström
113e7b5d23eSThomas Hellström if (writeback && !folio_mapped(to_folio) &&
114e7b5d23eSThomas Hellström folio_clear_dirty_for_io(to_folio)) {
115e7b5d23eSThomas Hellström struct writeback_control wbc = {
116e7b5d23eSThomas Hellström .sync_mode = WB_SYNC_NONE,
117e7b5d23eSThomas Hellström .nr_to_write = SWAP_CLUSTER_MAX,
118e7b5d23eSThomas Hellström .range_start = 0,
119e7b5d23eSThomas Hellström .range_end = LLONG_MAX,
120e7b5d23eSThomas Hellström .for_reclaim = 1,
121e7b5d23eSThomas Hellström };
122e7b5d23eSThomas Hellström folio_set_reclaim(to_folio);
123e7b5d23eSThomas Hellström ret = shmem_writeout(to_folio, &wbc);
124e7b5d23eSThomas Hellström if (!folio_test_writeback(to_folio))
125e7b5d23eSThomas Hellström folio_clear_reclaim(to_folio);
126e7b5d23eSThomas Hellström /*
127e7b5d23eSThomas Hellström * If writeout succeeds, it unlocks the folio. errors
128e7b5d23eSThomas Hellström * are otherwise dropped, since writeout is only best
129e7b5d23eSThomas Hellström * effort here.
130e7b5d23eSThomas Hellström */
131e7b5d23eSThomas Hellström if (ret)
132e7b5d23eSThomas Hellström folio_unlock(to_folio);
133e7b5d23eSThomas Hellström } else {
134e7b5d23eSThomas Hellström folio_unlock(to_folio);
135e7b5d23eSThomas Hellström }
136e7b5d23eSThomas Hellström
137e7b5d23eSThomas Hellström folio_put(to_folio);
138e7b5d23eSThomas Hellström
139e7b5d23eSThomas Hellström return handle;
140e7b5d23eSThomas Hellström }
141e7b5d23eSThomas Hellström
142e7b5d23eSThomas Hellström /**
143e7b5d23eSThomas Hellström * ttm_backup_fini() - Free the struct backup resources after last use.
144e7b5d23eSThomas Hellström * @backup: Pointer to the struct backup whose resources to free.
145e7b5d23eSThomas Hellström *
146e7b5d23eSThomas Hellström * After a call to this function, it's illegal to use the @backup pointer.
147e7b5d23eSThomas Hellström */
ttm_backup_fini(struct file * backup)148*d4ad53adSThomas Hellström void ttm_backup_fini(struct file *backup)
149e7b5d23eSThomas Hellström {
150*d4ad53adSThomas Hellström fput(backup);
151e7b5d23eSThomas Hellström }
152e7b5d23eSThomas Hellström
153e7b5d23eSThomas Hellström /**
154e7b5d23eSThomas Hellström * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space
155e7b5d23eSThomas Hellström * left for backup.
156e7b5d23eSThomas Hellström *
157e7b5d23eSThomas Hellström * This function is intended also for driver use to indicate whether a
158e7b5d23eSThomas Hellström * backup attempt is meaningful.
159e7b5d23eSThomas Hellström *
160e7b5d23eSThomas Hellström * Return: An approximate size of backup space available.
161e7b5d23eSThomas Hellström */
ttm_backup_bytes_avail(void)162e7b5d23eSThomas Hellström u64 ttm_backup_bytes_avail(void)
163e7b5d23eSThomas Hellström {
164e7b5d23eSThomas Hellström /*
165e7b5d23eSThomas Hellström * The idea behind backing up to shmem is that shmem objects may
166e7b5d23eSThomas Hellström * eventually be swapped out. So no point swapping out if there
167e7b5d23eSThomas Hellström * is no or low swap-space available. But the accuracy of this
168e7b5d23eSThomas Hellström * number also depends on shmem actually swapping out backed-up
169e7b5d23eSThomas Hellström * shmem objects without too much buffering.
170e7b5d23eSThomas Hellström */
171e7b5d23eSThomas Hellström return (u64)get_nr_swap_pages() << PAGE_SHIFT;
172e7b5d23eSThomas Hellström }
173e7b5d23eSThomas Hellström EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail);
174e7b5d23eSThomas Hellström
175e7b5d23eSThomas Hellström /**
176e7b5d23eSThomas Hellström * ttm_backup_shmem_create() - Create a shmem-based struct backup.
177e7b5d23eSThomas Hellström * @size: The maximum size (in bytes) to back up.
178e7b5d23eSThomas Hellström *
179e7b5d23eSThomas Hellström * Create a backup utilizing shmem objects.
180e7b5d23eSThomas Hellström *
181*d4ad53adSThomas Hellström * Return: A pointer to a struct file on success,
182e7b5d23eSThomas Hellström * an error pointer on error.
183e7b5d23eSThomas Hellström */
ttm_backup_shmem_create(loff_t size)184*d4ad53adSThomas Hellström struct file *ttm_backup_shmem_create(loff_t size)
185e7b5d23eSThomas Hellström {
186*d4ad53adSThomas Hellström return shmem_file_setup("ttm shmem backup", size, 0);
187e7b5d23eSThomas Hellström }
188