1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2024 Intel Corporation 4 */ 5 6 #include <drm/ttm/ttm_backup.h> 7 8 #include <linux/export.h> 9 #include <linux/page-flags.h> 10 #include <linux/swap.h> 11 12 /* 13 * Need to map shmem indices to handle since a handle value 14 * of 0 means error, following the swp_entry_t convention. 15 */ 16 static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx) 17 { 18 return (unsigned long)idx + 1; 19 } 20 21 static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle) 22 { 23 return handle - 1; 24 } 25 26 /** 27 * ttm_backup_drop() - release memory associated with a handle 28 * @backup: The struct backup pointer used to obtain the handle 29 * @handle: The handle obtained from the @backup_page function. 30 */ 31 void ttm_backup_drop(struct file *backup, pgoff_t handle) 32 { 33 loff_t start = ttm_backup_handle_to_shmem_idx(handle); 34 35 start <<= PAGE_SHIFT; 36 shmem_truncate_range(file_inode(backup), start, 37 start + PAGE_SIZE - 1); 38 } 39 40 /** 41 * ttm_backup_copy_page() - Copy the contents of a previously backed 42 * up page 43 * @backup: The struct backup pointer used to back up the page. 44 * @dst: The struct page to copy into. 45 * @handle: The handle returned when the page was backed up. 46 * @intr: Try to perform waits interruptible or at least killable. 47 * 48 * Return: 0 on success, Negative error code on failure, notably 49 * -EINTR if @intr was set to true and a signal is pending. 50 */ 51 int ttm_backup_copy_page(struct file *backup, struct page *dst, 52 pgoff_t handle, bool intr) 53 { 54 struct address_space *mapping = backup->f_mapping; 55 struct folio *from_folio; 56 pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle); 57 58 from_folio = shmem_read_folio(mapping, idx); 59 if (IS_ERR(from_folio)) 60 return PTR_ERR(from_folio); 61 62 copy_highpage(dst, folio_file_page(from_folio, idx)); 63 folio_put(from_folio); 64 65 return 0; 66 } 67 68 /** 69 * ttm_backup_backup_page() - Backup a page 70 * @backup: The struct backup pointer to use. 71 * @page: The page to back up. 72 * @writeback: Whether to perform immediate writeback of the page. 73 * This may have performance implications. 74 * @idx: A unique integer for each page and each struct backup. 75 * This allows the backup implementation to avoid managing 76 * its address space separately. 77 * @page_gfp: The gfp value used when the page was allocated. 78 * This is used for accounting purposes. 79 * @alloc_gfp: The gfp to be used when allocating memory. 80 * 81 * Context: If called from reclaim context, the caller needs to 82 * assert that the shrinker gfp has __GFP_FS set, to avoid 83 * deadlocking on lock_page(). If @writeback is set to true and 84 * called from reclaim context, the caller also needs to assert 85 * that the shrinker gfp has __GFP_IO set, since without it, 86 * we're not allowed to start backup IO. 87 * 88 * Return: A handle on success. Negative error code on failure. 89 * 90 * Note: This function could be extended to back up a folio and 91 * implementations would then split the folio internally if needed. 92 * Drawback is that the caller would then have to keep track of 93 * the folio size- and usage. 94 */ 95 s64 96 ttm_backup_backup_page(struct file *backup, struct page *page, 97 bool writeback, pgoff_t idx, gfp_t page_gfp, 98 gfp_t alloc_gfp) 99 { 100 struct address_space *mapping = backup->f_mapping; 101 unsigned long handle = 0; 102 struct folio *to_folio; 103 int ret; 104 105 to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp); 106 if (IS_ERR(to_folio)) 107 return PTR_ERR(to_folio); 108 109 folio_mark_accessed(to_folio); 110 folio_lock(to_folio); 111 folio_mark_dirty(to_folio); 112 copy_highpage(folio_file_page(to_folio, idx), page); 113 handle = ttm_backup_shmem_idx_to_handle(idx); 114 115 if (writeback && !folio_mapped(to_folio) && 116 folio_clear_dirty_for_io(to_folio)) { 117 struct writeback_control wbc = { 118 .sync_mode = WB_SYNC_NONE, 119 .nr_to_write = SWAP_CLUSTER_MAX, 120 .range_start = 0, 121 .range_end = LLONG_MAX, 122 .for_reclaim = 1, 123 }; 124 folio_set_reclaim(to_folio); 125 ret = shmem_writeout(to_folio, &wbc); 126 if (!folio_test_writeback(to_folio)) 127 folio_clear_reclaim(to_folio); 128 /* 129 * If writeout succeeds, it unlocks the folio. errors 130 * are otherwise dropped, since writeout is only best 131 * effort here. 132 */ 133 if (ret) 134 folio_unlock(to_folio); 135 } else { 136 folio_unlock(to_folio); 137 } 138 139 folio_put(to_folio); 140 141 return handle; 142 } 143 144 /** 145 * ttm_backup_fini() - Free the struct backup resources after last use. 146 * @backup: Pointer to the struct backup whose resources to free. 147 * 148 * After a call to this function, it's illegal to use the @backup pointer. 149 */ 150 void ttm_backup_fini(struct file *backup) 151 { 152 fput(backup); 153 } 154 155 /** 156 * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space 157 * left for backup. 158 * 159 * This function is intended also for driver use to indicate whether a 160 * backup attempt is meaningful. 161 * 162 * Return: An approximate size of backup space available. 163 */ 164 u64 ttm_backup_bytes_avail(void) 165 { 166 /* 167 * The idea behind backing up to shmem is that shmem objects may 168 * eventually be swapped out. So no point swapping out if there 169 * is no or low swap-space available. But the accuracy of this 170 * number also depends on shmem actually swapping out backed-up 171 * shmem objects without too much buffering. 172 */ 173 return (u64)get_nr_swap_pages() << PAGE_SHIFT; 174 } 175 EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail); 176 177 /** 178 * ttm_backup_shmem_create() - Create a shmem-based struct backup. 179 * @size: The maximum size (in bytes) to back up. 180 * 181 * Create a backup utilizing shmem objects. 182 * 183 * Return: A pointer to a struct file on success, 184 * an error pointer on error. 185 */ 186 struct file *ttm_backup_shmem_create(loff_t size) 187 { 188 return shmem_file_setup("ttm shmem backup", size, 0); 189 } 190