1 // SPDX-License-Identifier: GPL-2.0-only 2 /* Read with PG_private_2 [DEPRECATED]. 3 * 4 * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved. 5 * Written by David Howells (dhowells@redhat.com) 6 */ 7 8 #include <linux/export.h> 9 #include <linux/fs.h> 10 #include <linux/mm.h> 11 #include <linux/pagemap.h> 12 #include <linux/slab.h> 13 #include <linux/task_io_accounting_ops.h> 14 #include "internal.h" 15 16 /* 17 * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2. The 18 * third mark in the folio queue is used to indicate that this folio needs 19 * writing. 20 */ 21 void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq, 22 struct netfs_io_request *rreq, 23 struct folio_queue *folioq, 24 int slot) 25 { 26 struct folio *folio = folioq_folio(folioq, slot); 27 28 trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache); 29 folio_start_private_2(folio); 30 folioq_mark3(folioq, slot); 31 } 32 33 /* 34 * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an 35 * unrecoverable error. 36 */ 37 static void netfs_pgpriv2_cancel(struct folio_queue *folioq) 38 { 39 struct folio *folio; 40 int slot; 41 42 while (folioq) { 43 if (!folioq->marks3) { 44 folioq = folioq->next; 45 continue; 46 } 47 48 slot = __ffs(folioq->marks3); 49 folio = folioq_folio(folioq, slot); 50 51 trace_netfs_folio(folio, netfs_folio_trace_cancel_copy); 52 folio_end_private_2(folio); 53 folioq_unmark3(folioq, slot); 54 } 55 } 56 57 /* 58 * [DEPRECATED] Copy a folio to the cache with PG_private_2 set. 59 */ 60 static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio) 61 { 62 struct netfs_io_stream *cache = &wreq->io_streams[1]; 63 size_t fsize = folio_size(folio), flen = fsize; 64 loff_t fpos = folio_pos(folio), i_size; 65 bool to_eof = false; 66 67 _enter(""); 68 69 /* netfs_perform_write() may shift i_size around the page or from out 70 * of the page to beyond it, but cannot move i_size into or through the 71 * page since we have it locked. 72 */ 73 i_size = i_size_read(wreq->inode); 74 75 if (fpos >= i_size) { 76 /* mmap beyond eof. */ 77 _debug("beyond eof"); 78 folio_end_private_2(folio); 79 return 0; 80 } 81 82 if (fpos + fsize > wreq->i_size) 83 wreq->i_size = i_size; 84 85 if (flen > i_size - fpos) { 86 flen = i_size - fpos; 87 to_eof = true; 88 } else if (flen == i_size - fpos) { 89 to_eof = true; 90 } 91 92 _debug("folio %zx %zx", flen, fsize); 93 94 trace_netfs_folio(folio, netfs_folio_trace_store_copy); 95 96 /* Attach the folio to the rolling buffer. */ 97 if (netfs_buffer_append_folio(wreq, folio, false) < 0) 98 return -ENOMEM; 99 100 cache->submit_extendable_to = fsize; 101 cache->submit_off = 0; 102 cache->submit_len = flen; 103 104 /* Attach the folio to one or more subrequests. For a big folio, we 105 * could end up with thousands of subrequests if the wsize is small - 106 * but we might need to wait during the creation of subrequests for 107 * network resources (eg. SMB credits). 108 */ 109 do { 110 ssize_t part; 111 112 wreq->io_iter.iov_offset = cache->submit_off; 113 114 atomic64_set(&wreq->issued_to, fpos + cache->submit_off); 115 cache->submit_extendable_to = fsize - cache->submit_off; 116 part = netfs_advance_write(wreq, cache, fpos + cache->submit_off, 117 cache->submit_len, to_eof); 118 cache->submit_off += part; 119 if (part > cache->submit_len) 120 cache->submit_len = 0; 121 else 122 cache->submit_len -= part; 123 } while (cache->submit_len > 0); 124 125 wreq->io_iter.iov_offset = 0; 126 iov_iter_advance(&wreq->io_iter, fsize); 127 atomic64_set(&wreq->issued_to, fpos + fsize); 128 129 if (flen < fsize) 130 netfs_issue_write(wreq, cache); 131 132 _leave(" = 0"); 133 return 0; 134 } 135 136 /* 137 * [DEPRECATED] Go through the buffer and write any folios that are marked with 138 * the third mark to the cache. 139 */ 140 void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq) 141 { 142 struct netfs_io_request *wreq; 143 struct folio_queue *folioq; 144 struct folio *folio; 145 int error = 0; 146 int slot = 0; 147 148 _enter(""); 149 150 if (!fscache_resources_valid(&rreq->cache_resources)) 151 goto couldnt_start; 152 153 /* Need the first folio to be able to set up the op. */ 154 for (folioq = rreq->buffer; folioq; folioq = folioq->next) { 155 if (folioq->marks3) { 156 slot = __ffs(folioq->marks3); 157 break; 158 } 159 } 160 if (!folioq) 161 return; 162 folio = folioq_folio(folioq, slot); 163 164 wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio), 165 NETFS_PGPRIV2_COPY_TO_CACHE); 166 if (IS_ERR(wreq)) { 167 kleave(" [create %ld]", PTR_ERR(wreq)); 168 goto couldnt_start; 169 } 170 171 trace_netfs_write(wreq, netfs_write_trace_copy_to_cache); 172 netfs_stat(&netfs_n_wh_copy_to_cache); 173 if (!wreq->io_streams[1].avail) { 174 netfs_put_request(wreq, false, netfs_rreq_trace_put_return); 175 goto couldnt_start; 176 } 177 178 for (;;) { 179 error = netfs_pgpriv2_copy_folio(wreq, folio); 180 if (error < 0) 181 break; 182 183 folioq_unmark3(folioq, slot); 184 if (!folioq->marks3) { 185 folioq = folioq->next; 186 if (!folioq) 187 break; 188 } 189 190 slot = __ffs(folioq->marks3); 191 folio = folioq_folio(folioq, slot); 192 } 193 194 netfs_issue_write(wreq, &wreq->io_streams[1]); 195 smp_wmb(); /* Write lists before ALL_QUEUED. */ 196 set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags); 197 198 netfs_put_request(wreq, false, netfs_rreq_trace_put_return); 199 _leave(" = %d", error); 200 couldnt_start: 201 netfs_pgpriv2_cancel(rreq->buffer); 202 } 203 204 /* 205 * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished 206 * copying. 207 */ 208 bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq) 209 { 210 struct folio_queue *folioq = wreq->buffer; 211 unsigned long long collected_to = wreq->collected_to; 212 unsigned int slot = wreq->buffer_head_slot; 213 bool made_progress = false; 214 215 if (slot >= folioq_nr_slots(folioq)) { 216 folioq = netfs_delete_buffer_head(wreq); 217 slot = 0; 218 } 219 220 for (;;) { 221 struct folio *folio; 222 unsigned long long fpos, fend; 223 size_t fsize, flen; 224 225 folio = folioq_folio(folioq, slot); 226 if (WARN_ONCE(!folio_test_private_2(folio), 227 "R=%08x: folio %lx is not marked private_2\n", 228 wreq->debug_id, folio->index)) 229 trace_netfs_folio(folio, netfs_folio_trace_not_under_wback); 230 231 fpos = folio_pos(folio); 232 fsize = folio_size(folio); 233 flen = fsize; 234 235 fend = min_t(unsigned long long, fpos + flen, wreq->i_size); 236 237 trace_netfs_collect_folio(wreq, folio, fend, collected_to); 238 239 /* Unlock any folio we've transferred all of. */ 240 if (collected_to < fend) 241 break; 242 243 trace_netfs_folio(folio, netfs_folio_trace_end_copy); 244 folio_end_private_2(folio); 245 wreq->cleaned_to = fpos + fsize; 246 made_progress = true; 247 248 /* Clean up the head folioq. If we clear an entire folioq, then 249 * we can get rid of it provided it's not also the tail folioq 250 * being filled by the issuer. 251 */ 252 folioq_clear(folioq, slot); 253 slot++; 254 if (slot >= folioq_nr_slots(folioq)) { 255 if (READ_ONCE(wreq->buffer_tail) == folioq) 256 break; 257 folioq = netfs_delete_buffer_head(wreq); 258 slot = 0; 259 } 260 261 if (fpos + fsize >= collected_to) 262 break; 263 } 264 265 wreq->buffer = folioq; 266 wreq->buffer_head_slot = slot; 267 return made_progress; 268 } 269