xref: /linux/fs/netfs/read_pgpriv2.c (revision 35219bc5c71f4197c8bd10297597de797c1eece5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Read with PG_private_2 [DEPRECATED].
3  *
4  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/export.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/task_io_accounting_ops.h>
14 #include "internal.h"
15 
16 /*
17  * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2.  The
18  * third mark in the folio queue is used to indicate that this folio needs
19  * writing.
20  */
netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest * subreq,struct netfs_io_request * rreq,struct folio_queue * folioq,int slot)21 void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
22 				      struct netfs_io_request *rreq,
23 				      struct folio_queue *folioq,
24 				      int slot)
25 {
26 	struct folio *folio = folioq_folio(folioq, slot);
27 
28 	trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
29 	folio_start_private_2(folio);
30 	folioq_mark3(folioq, slot);
31 }
32 
33 /*
34  * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an
35  * unrecoverable error.
36  */
netfs_pgpriv2_cancel(struct folio_queue * folioq)37 static void netfs_pgpriv2_cancel(struct folio_queue *folioq)
38 {
39 	struct folio *folio;
40 	int slot;
41 
42 	while (folioq) {
43 		if (!folioq->marks3) {
44 			folioq = folioq->next;
45 			continue;
46 		}
47 
48 		slot = __ffs(folioq->marks3);
49 		folio = folioq_folio(folioq, slot);
50 
51 		trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
52 		folio_end_private_2(folio);
53 		folioq_unmark3(folioq, slot);
54 	}
55 }
56 
57 /*
58  * [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
59  */
netfs_pgpriv2_copy_folio(struct netfs_io_request * wreq,struct folio * folio)60 static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio)
61 {
62 	struct netfs_io_stream *cache  = &wreq->io_streams[1];
63 	size_t fsize = folio_size(folio), flen = fsize;
64 	loff_t fpos = folio_pos(folio), i_size;
65 	bool to_eof = false;
66 
67 	_enter("");
68 
69 	/* netfs_perform_write() may shift i_size around the page or from out
70 	 * of the page to beyond it, but cannot move i_size into or through the
71 	 * page since we have it locked.
72 	 */
73 	i_size = i_size_read(wreq->inode);
74 
75 	if (fpos >= i_size) {
76 		/* mmap beyond eof. */
77 		_debug("beyond eof");
78 		folio_end_private_2(folio);
79 		return 0;
80 	}
81 
82 	if (fpos + fsize > wreq->i_size)
83 		wreq->i_size = i_size;
84 
85 	if (flen > i_size - fpos) {
86 		flen = i_size - fpos;
87 		to_eof = true;
88 	} else if (flen == i_size - fpos) {
89 		to_eof = true;
90 	}
91 
92 	_debug("folio %zx %zx", flen, fsize);
93 
94 	trace_netfs_folio(folio, netfs_folio_trace_store_copy);
95 
96 	/* Attach the folio to the rolling buffer. */
97 	if (netfs_buffer_append_folio(wreq, folio, false) < 0)
98 		return -ENOMEM;
99 
100 	cache->submit_extendable_to = fsize;
101 	cache->submit_off = 0;
102 	cache->submit_len = flen;
103 
104 	/* Attach the folio to one or more subrequests.  For a big folio, we
105 	 * could end up with thousands of subrequests if the wsize is small -
106 	 * but we might need to wait during the creation of subrequests for
107 	 * network resources (eg. SMB credits).
108 	 */
109 	do {
110 		ssize_t part;
111 
112 		wreq->io_iter.iov_offset = cache->submit_off;
113 
114 		atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
115 		cache->submit_extendable_to = fsize - cache->submit_off;
116 		part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
117 					   cache->submit_len, to_eof);
118 		cache->submit_off += part;
119 		if (part > cache->submit_len)
120 			cache->submit_len = 0;
121 		else
122 			cache->submit_len -= part;
123 	} while (cache->submit_len > 0);
124 
125 	wreq->io_iter.iov_offset = 0;
126 	iov_iter_advance(&wreq->io_iter, fsize);
127 	atomic64_set(&wreq->issued_to, fpos + fsize);
128 
129 	if (flen < fsize)
130 		netfs_issue_write(wreq, cache);
131 
132 	_leave(" = 0");
133 	return 0;
134 }
135 
136 /*
137  * [DEPRECATED] Go through the buffer and write any folios that are marked with
138  * the third mark to the cache.
139  */
netfs_pgpriv2_write_to_the_cache(struct netfs_io_request * rreq)140 void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
141 {
142 	struct netfs_io_request *wreq;
143 	struct folio_queue *folioq;
144 	struct folio *folio;
145 	int error = 0;
146 	int slot = 0;
147 
148 	_enter("");
149 
150 	if (!fscache_resources_valid(&rreq->cache_resources))
151 		goto couldnt_start;
152 
153 	/* Need the first folio to be able to set up the op. */
154 	for (folioq = rreq->buffer; folioq; folioq = folioq->next) {
155 		if (folioq->marks3) {
156 			slot = __ffs(folioq->marks3);
157 			break;
158 		}
159 	}
160 	if (!folioq)
161 		return;
162 	folio = folioq_folio(folioq, slot);
163 
164 	wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
165 				      NETFS_PGPRIV2_COPY_TO_CACHE);
166 	if (IS_ERR(wreq)) {
167 		kleave(" [create %ld]", PTR_ERR(wreq));
168 		goto couldnt_start;
169 	}
170 
171 	trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
172 	netfs_stat(&netfs_n_wh_copy_to_cache);
173 
174 	for (;;) {
175 		error = netfs_pgpriv2_copy_folio(wreq, folio);
176 		if (error < 0)
177 			break;
178 
179 		folioq_unmark3(folioq, slot);
180 		if (!folioq->marks3) {
181 			folioq = folioq->next;
182 			if (!folioq)
183 				break;
184 		}
185 
186 		slot = __ffs(folioq->marks3);
187 		folio = folioq_folio(folioq, slot);
188 	}
189 
190 	netfs_issue_write(wreq, &wreq->io_streams[1]);
191 	smp_wmb(); /* Write lists before ALL_QUEUED. */
192 	set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
193 
194 	netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
195 	_leave(" = %d", error);
196 couldnt_start:
197 	netfs_pgpriv2_cancel(rreq->buffer);
198 }
199 
200 /*
201  * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
202  * copying.
203  */
netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request * wreq)204 bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
205 {
206 	struct folio_queue *folioq = wreq->buffer;
207 	unsigned long long collected_to = wreq->collected_to;
208 	unsigned int slot = wreq->buffer_head_slot;
209 	bool made_progress = false;
210 
211 	if (slot >= folioq_nr_slots(folioq)) {
212 		folioq = netfs_delete_buffer_head(wreq);
213 		slot = 0;
214 	}
215 
216 	for (;;) {
217 		struct folio *folio;
218 		unsigned long long fpos, fend;
219 		size_t fsize, flen;
220 
221 		folio = folioq_folio(folioq, slot);
222 		if (WARN_ONCE(!folio_test_private_2(folio),
223 			      "R=%08x: folio %lx is not marked private_2\n",
224 			      wreq->debug_id, folio->index))
225 			trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
226 
227 		fpos = folio_pos(folio);
228 		fsize = folio_size(folio);
229 		flen = fsize;
230 
231 		fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
232 
233 		trace_netfs_collect_folio(wreq, folio, fend, collected_to);
234 
235 		/* Unlock any folio we've transferred all of. */
236 		if (collected_to < fend)
237 			break;
238 
239 		trace_netfs_folio(folio, netfs_folio_trace_end_copy);
240 		folio_end_private_2(folio);
241 		wreq->cleaned_to = fpos + fsize;
242 		made_progress = true;
243 
244 		/* Clean up the head folioq.  If we clear an entire folioq, then
245 		 * we can get rid of it provided it's not also the tail folioq
246 		 * being filled by the issuer.
247 		 */
248 		folioq_clear(folioq, slot);
249 		slot++;
250 		if (slot >= folioq_nr_slots(folioq)) {
251 			if (READ_ONCE(wreq->buffer_tail) == folioq)
252 				break;
253 			folioq = netfs_delete_buffer_head(wreq);
254 			slot = 0;
255 		}
256 
257 		if (fpos + fsize >= collected_to)
258 			break;
259 	}
260 
261 	wreq->buffer = folioq;
262 	wreq->buffer_head_slot = slot;
263 	return made_progress;
264 }
265