1 // SPDX-License-Identifier: MIT 2 /* 3 * Copyright © 2024 Intel Corporation 4 */ 5 6 #include <drm/ttm/ttm_backup.h> 7 8 #include <linux/export.h> 9 #include <linux/page-flags.h> 10 #include <linux/swap.h> 11 12 /* 13 * Need to map shmem indices to handle since a handle value 14 * of 0 means error, following the swp_entry_t convention. 15 */ 16 static unsigned long ttm_backup_shmem_idx_to_handle(pgoff_t idx) 17 { 18 return (unsigned long)idx + 1; 19 } 20 21 static pgoff_t ttm_backup_handle_to_shmem_idx(pgoff_t handle) 22 { 23 return handle - 1; 24 } 25 26 /** 27 * ttm_backup_drop() - release memory associated with a handle 28 * @backup: The struct backup pointer used to obtain the handle 29 * @handle: The handle obtained from the @backup_page function. 30 */ 31 void ttm_backup_drop(struct file *backup, pgoff_t handle) 32 { 33 loff_t start = ttm_backup_handle_to_shmem_idx(handle); 34 35 start <<= PAGE_SHIFT; 36 shmem_truncate_range(file_inode(backup), start, 37 start + PAGE_SIZE - 1); 38 } 39 40 /** 41 * ttm_backup_copy_page() - Copy the contents of a previously backed 42 * up page 43 * @backup: The struct backup pointer used to back up the page. 44 * @dst: The struct page to copy into. 45 * @handle: The handle returned when the page was backed up. 46 * @intr: Try to perform waits interruptible or at least killable. 47 * @additional_gfp: GFP mask to add to the default GFP mask if any. 48 * 49 * Return: 0 on success, Negative error code on failure, notably 50 * -EINTR if @intr was set to true and a signal is pending. 51 */ 52 int ttm_backup_copy_page(struct file *backup, struct page *dst, 53 pgoff_t handle, bool intr, gfp_t additional_gfp) 54 { 55 struct address_space *mapping = backup->f_mapping; 56 struct folio *from_folio; 57 pgoff_t idx = ttm_backup_handle_to_shmem_idx(handle); 58 59 from_folio = shmem_read_folio_gfp(mapping, idx, mapping_gfp_mask(mapping) 60 | additional_gfp); 61 if (IS_ERR(from_folio)) 62 return PTR_ERR(from_folio); 63 64 copy_highpage(dst, folio_file_page(from_folio, idx)); 65 folio_put(from_folio); 66 67 return 0; 68 } 69 70 /** 71 * ttm_backup_backup_page() - Backup a page 72 * @backup: The struct backup pointer to use. 73 * @page: The page to back up. 74 * @writeback: Whether to perform immediate writeback of the page. 75 * This may have performance implications. 76 * @idx: A unique integer for each page and each struct backup. 77 * This allows the backup implementation to avoid managing 78 * its address space separately. 79 * @page_gfp: The gfp value used when the page was allocated. 80 * This is used for accounting purposes. 81 * @alloc_gfp: The gfp to be used when allocating memory. 82 * 83 * Context: If called from reclaim context, the caller needs to 84 * assert that the shrinker gfp has __GFP_FS set, to avoid 85 * deadlocking on lock_page(). If @writeback is set to true and 86 * called from reclaim context, the caller also needs to assert 87 * that the shrinker gfp has __GFP_IO set, since without it, 88 * we're not allowed to start backup IO. 89 * 90 * Return: A handle on success. Negative error code on failure. 91 * 92 * Note: This function could be extended to back up a folio and 93 * implementations would then split the folio internally if needed. 94 * Drawback is that the caller would then have to keep track of 95 * the folio size- and usage. 96 */ 97 s64 98 ttm_backup_backup_page(struct file *backup, struct page *page, 99 bool writeback, pgoff_t idx, gfp_t page_gfp, 100 gfp_t alloc_gfp) 101 { 102 struct address_space *mapping = backup->f_mapping; 103 unsigned long handle = 0; 104 struct folio *to_folio; 105 int ret; 106 107 to_folio = shmem_read_folio_gfp(mapping, idx, alloc_gfp); 108 if (IS_ERR(to_folio)) 109 return PTR_ERR(to_folio); 110 111 folio_mark_accessed(to_folio); 112 folio_lock(to_folio); 113 folio_mark_dirty(to_folio); 114 copy_highpage(folio_file_page(to_folio, idx), page); 115 handle = ttm_backup_shmem_idx_to_handle(idx); 116 117 if (writeback && !folio_mapped(to_folio) && 118 folio_clear_dirty_for_io(to_folio)) { 119 folio_set_reclaim(to_folio); 120 ret = shmem_writeout(to_folio, NULL, NULL); 121 if (!folio_test_writeback(to_folio)) 122 folio_clear_reclaim(to_folio); 123 /* 124 * If writeout succeeds, it unlocks the folio. errors 125 * are otherwise dropped, since writeout is only best 126 * effort here. 127 */ 128 if (ret) 129 folio_unlock(to_folio); 130 } else { 131 folio_unlock(to_folio); 132 } 133 134 folio_put(to_folio); 135 136 return handle; 137 } 138 139 /** 140 * ttm_backup_fini() - Free the struct backup resources after last use. 141 * @backup: Pointer to the struct backup whose resources to free. 142 * 143 * After a call to this function, it's illegal to use the @backup pointer. 144 */ 145 void ttm_backup_fini(struct file *backup) 146 { 147 fput(backup); 148 } 149 150 /** 151 * ttm_backup_bytes_avail() - Report the approximate number of bytes of backup space 152 * left for backup. 153 * 154 * This function is intended also for driver use to indicate whether a 155 * backup attempt is meaningful. 156 * 157 * Return: An approximate size of backup space available. 158 */ 159 u64 ttm_backup_bytes_avail(void) 160 { 161 /* 162 * The idea behind backing up to shmem is that shmem objects may 163 * eventually be swapped out. So no point swapping out if there 164 * is no or low swap-space available. But the accuracy of this 165 * number also depends on shmem actually swapping out backed-up 166 * shmem objects without too much buffering. 167 */ 168 return (u64)get_nr_swap_pages() << PAGE_SHIFT; 169 } 170 EXPORT_SYMBOL_GPL(ttm_backup_bytes_avail); 171 172 /** 173 * ttm_backup_shmem_create() - Create a shmem-based struct backup. 174 * @size: The maximum size (in bytes) to back up. 175 * 176 * Create a backup utilizing shmem objects. 177 * 178 * Return: A pointer to a struct file on success, 179 * an error pointer on error. 180 */ 181 struct file *ttm_backup_shmem_create(loff_t size) 182 { 183 return shmem_file_setup("ttm shmem backup", size, 184 EMPTY_VMA_FLAGS); 185 } 186