1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2024 Intel Corporation 4 */ 5 6 #include <drm/ttm/ttm_backup.h> 7 #include <linux/page-flags.h> 8 #include <linux/swap.h> 9 10 /* 11 * Need to map shmem indices to handle since a handle value 12 * of 0 means error, following the swp_entry_t convention. 13 */ 14 static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx) 15 { 16 return (unsigned long)idx + 1; 17 } 18 19 static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle) 20 { 21 return handle - 1; 22 } 23 24 /** 25 * ttm_backup_drop() - release memory associated with a handle 26 * @backup: The struct backup pointer used to obtain the handle 27 * @handle: The handle obtained from the @backup_page function. 28 */ 29 void ttm_backup_drop(struct file *backup, pgoff_t handle) 30 { 31 loff_t start = ttm_backup_handle_to_shmem_idx(handle); 32 33 start <<= PAGE_SHIFT; 34 shmem_truncate_range(file_inode(backup), start, 35 start + PAGE_SIZE - 1); 36 } 37 38 /** 39 * ttm_backup_copy_page() - Copy the contents of a previously backed 40 * up page 41 * @backup: The struct backup pointer used to back up the page. 42 * @dst: The struct page to copy into. 43 * @handle: The handle returned when the page was backed up. 44 * @intr: Try to perform waits interruptible or at least killable. 45 * 46 * Return: 0 on success, Negative error code on failure, notably 47 * -EINTR if @intr was set to true and a signal is pending. 48 */ 49 int ttm_backup_copy_page(struct file *backup, struct page *dst, 50 pgoff_t handle, bool intr) 51 { 52 struct address_space *mapping = backup->f_mapping; 53 struct folio *from_folio; 54 pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle); 55 56 from_folio = shmem_read_folio(mapping, idx); 57 if (IS_ERR(from_folio)) 58 return PTR_ERR(from_folio); 59 60 copy_highpage(dst, folio_file_page(from_folio, idx)); 61 folio_put(from_folio); 62 63 return 0; 64 } 65 66 /** 67 * ttm_backup_backup_page() - Backup a page 68 * @backup: The struct backup pointer to use. 69 * @page: The page to back up. 70 * @writeback: Whether to perform immediate writeback of the page. 71 * This may have performance implications. 72 * @idx: A unique integer for each page and each struct backup. 73 * This allows the backup implementation to avoid managing 74 * its address space separately. 75 * @page_gfp: The gfp value used when the page was allocated. 76 * This is used for accounting purposes. 77 * @alloc_gfp: The gfp to be used when allocating memory. 78 * 79 * Context: If called from reclaim context, the caller needs to 80 * assert that the shrinker gfp has __GFP_FS set, to avoid 81 * deadlocking on lock_page(). If @writeback is set to true and 82 * called from reclaim context, the caller also needs to assert 83 * that the shrinker gfp has __GFP_IO set, since without it, 84 * we're not allowed to start backup IO. 85 * 86 * Return: A handle on success. Negative error code on failure. 87 * 88 * Note: This function could be extended to back up a folio and 89 * implementations would then split the folio internally if needed. 90 * Drawback is that the caller would then have to keep track of 91 * the folio size- and usage. 92 */ 93 s64 94 ttm_backup_backup_page(struct file *backup, struct page *page, 95 bool writeback, pgoff_t idx, gfp_t page_gfp, 96 gfp_t alloc_gfp) 97 { 98 struct address_space *mapping = backup->f_mapping; 99 unsigned long handle = 0; 100 struct folio *to_folio; 101 int ret; 102 103 to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp); 104 if (IS_ERR(to_folio)) 105 return PTR_ERR(to_folio); 106 107 folio_mark_accessed(to_folio); 108 folio_lock(to_folio); 109 folio_mark_dirty(to_folio); 110 copy_highpage(folio_file_page(to_folio, idx), page); 111 handle = ttm_backup_shmem_idx_to_handle(idx); 112 113 if (writeback && !folio_mapped(to_folio) && 114 folio_clear_dirty_for_io(to_folio)) { 115 folio_set_reclaim(to_folio); 116 ret = shmem_writeout(to_folio, NULL, NULL); 117 if (!folio_test_writeback(to_folio)) 118 folio_clear_reclaim(to_folio); 119 /* 120 * If writeout succeeds, it unlocks the folio. errors 121 * are otherwise dropped, since writeout is only best 122 * effort here. 123 */ 124 if (ret) 125 folio_unlock(to_folio); 126 } else { 127 folio_unlock(to_folio); 128 } 129 130 folio_put(to_folio); 131 132 return handle; 133 } 134 135 /** 136 * ttm_backup_fini() - Free the struct backup resources after last use. 137 * @backup: Pointer to the struct backup whose resources to free. 138 * 139 * After a call to this function, it's illegal to use the @backup pointer. 140 */ 141 void ttm_backup_fini(struct file *backup) 142 { 143 fput(backup); 144 } 145 146 /** 147 * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space 148 * left for backup. 149 * 150 * This function is intended also for driver use to indicate whether a 151 * backup attempt is meaningful. 152 * 153 * Return: An approximate size of backup space available. 154 */ 155 u64 ttm_backup_bytes_avail(void) 156 { 157 /* 158 * The idea behind backing up to shmem is that shmem objects may 159 * eventually be swapped out. So no point swapping out if there 160 * is no or low swap-space available. But the accuracy of this 161 * number also depends on shmem actually swapping out backed-up 162 * shmem objects without too much buffering. 163 */ 164 return (u64)get_nr_swap_pages() << PAGE_SHIFT; 165 } 166 EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail); 167 168 /** 169 * ttm_backup_shmem_create() - Create a shmem-based struct backup. 170 * @size: The maximum size (in bytes) to back up. 171 * 172 * Create a backup utilizing shmem objects. 173 * 174 * Return: A pointer to a struct file on success, 175 * an error pointer on error. 176 */ 177 struct file *ttm_backup_shmem_create(loff_t size) 178 { 179 return shmem_file_setup("ttm shmem backup", size, 0); 180 } 181