xref: /linux/fs/netfs/read_pgpriv2.c (revision c4f1450ecccc5311db87f806998eda1c824c4e35)
1ee4cdf7bSDavid Howells // SPDX-License-Identifier: GPL-2.0-only
2ee4cdf7bSDavid Howells /* Read with PG_private_2 [DEPRECATED].
3ee4cdf7bSDavid Howells  *
4ee4cdf7bSDavid Howells  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5ee4cdf7bSDavid Howells  * Written by David Howells (dhowells@redhat.com)
6ee4cdf7bSDavid Howells  */
7ee4cdf7bSDavid Howells 
8ee4cdf7bSDavid Howells #include <linux/export.h>
9ee4cdf7bSDavid Howells #include <linux/fs.h>
10ee4cdf7bSDavid Howells #include <linux/mm.h>
11ee4cdf7bSDavid Howells #include <linux/pagemap.h>
12ee4cdf7bSDavid Howells #include <linux/slab.h>
13ee4cdf7bSDavid Howells #include <linux/task_io_accounting_ops.h>
14ee4cdf7bSDavid Howells #include "internal.h"
15ee4cdf7bSDavid Howells 
16ee4cdf7bSDavid Howells /*
17ee4cdf7bSDavid Howells  * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2.  The
18ee4cdf7bSDavid Howells  * third mark in the folio queue is used to indicate that this folio needs
19ee4cdf7bSDavid Howells  * writing.
20ee4cdf7bSDavid Howells  */
21ee4cdf7bSDavid Howells void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
22ee4cdf7bSDavid Howells 				      struct netfs_io_request *rreq,
23ee4cdf7bSDavid Howells 				      struct folio_queue *folioq,
24ee4cdf7bSDavid Howells 				      int slot)
25ee4cdf7bSDavid Howells {
26ee4cdf7bSDavid Howells 	struct folio *folio = folioq_folio(folioq, slot);
27ee4cdf7bSDavid Howells 
28ee4cdf7bSDavid Howells 	trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
29ee4cdf7bSDavid Howells 	folio_start_private_2(folio);
30ee4cdf7bSDavid Howells 	folioq_mark3(folioq, slot);
31ee4cdf7bSDavid Howells }
32ee4cdf7bSDavid Howells 
33ee4cdf7bSDavid Howells /*
34ee4cdf7bSDavid Howells  * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an
35ee4cdf7bSDavid Howells  * unrecoverable error.
36ee4cdf7bSDavid Howells  */
37ee4cdf7bSDavid Howells static void netfs_pgpriv2_cancel(struct folio_queue *folioq)
38ee4cdf7bSDavid Howells {
39ee4cdf7bSDavid Howells 	struct folio *folio;
40ee4cdf7bSDavid Howells 	int slot;
41ee4cdf7bSDavid Howells 
42ee4cdf7bSDavid Howells 	while (folioq) {
43ee4cdf7bSDavid Howells 		if (!folioq->marks3) {
44ee4cdf7bSDavid Howells 			folioq = folioq->next;
45ee4cdf7bSDavid Howells 			continue;
46ee4cdf7bSDavid Howells 		}
47ee4cdf7bSDavid Howells 
48ee4cdf7bSDavid Howells 		slot = __ffs(folioq->marks3);
49ee4cdf7bSDavid Howells 		folio = folioq_folio(folioq, slot);
50ee4cdf7bSDavid Howells 
51ee4cdf7bSDavid Howells 		trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
52ee4cdf7bSDavid Howells 		folio_end_private_2(folio);
53ee4cdf7bSDavid Howells 		folioq_unmark3(folioq, slot);
54ee4cdf7bSDavid Howells 	}
55ee4cdf7bSDavid Howells }
56ee4cdf7bSDavid Howells 
57ee4cdf7bSDavid Howells /*
58ee4cdf7bSDavid Howells  * [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
59ee4cdf7bSDavid Howells  */
60ee4cdf7bSDavid Howells static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio)
61ee4cdf7bSDavid Howells {
62ee4cdf7bSDavid Howells 	struct netfs_io_stream *cache  = &wreq->io_streams[1];
63ee4cdf7bSDavid Howells 	size_t fsize = folio_size(folio), flen = fsize;
64ee4cdf7bSDavid Howells 	loff_t fpos = folio_pos(folio), i_size;
65ee4cdf7bSDavid Howells 	bool to_eof = false;
66ee4cdf7bSDavid Howells 
67ee4cdf7bSDavid Howells 	_enter("");
68ee4cdf7bSDavid Howells 
69ee4cdf7bSDavid Howells 	/* netfs_perform_write() may shift i_size around the page or from out
70ee4cdf7bSDavid Howells 	 * of the page to beyond it, but cannot move i_size into or through the
71ee4cdf7bSDavid Howells 	 * page since we have it locked.
72ee4cdf7bSDavid Howells 	 */
73ee4cdf7bSDavid Howells 	i_size = i_size_read(wreq->inode);
74ee4cdf7bSDavid Howells 
75ee4cdf7bSDavid Howells 	if (fpos >= i_size) {
76ee4cdf7bSDavid Howells 		/* mmap beyond eof. */
77ee4cdf7bSDavid Howells 		_debug("beyond eof");
78ee4cdf7bSDavid Howells 		folio_end_private_2(folio);
79ee4cdf7bSDavid Howells 		return 0;
80ee4cdf7bSDavid Howells 	}
81ee4cdf7bSDavid Howells 
82ee4cdf7bSDavid Howells 	if (fpos + fsize > wreq->i_size)
83ee4cdf7bSDavid Howells 		wreq->i_size = i_size;
84ee4cdf7bSDavid Howells 
85ee4cdf7bSDavid Howells 	if (flen > i_size - fpos) {
86ee4cdf7bSDavid Howells 		flen = i_size - fpos;
87ee4cdf7bSDavid Howells 		to_eof = true;
88ee4cdf7bSDavid Howells 	} else if (flen == i_size - fpos) {
89ee4cdf7bSDavid Howells 		to_eof = true;
90ee4cdf7bSDavid Howells 	}
91ee4cdf7bSDavid Howells 
92ee4cdf7bSDavid Howells 	_debug("folio %zx %zx", flen, fsize);
93ee4cdf7bSDavid Howells 
94ee4cdf7bSDavid Howells 	trace_netfs_folio(folio, netfs_folio_trace_store_copy);
95ee4cdf7bSDavid Howells 
96ee4cdf7bSDavid Howells 	/* Attach the folio to the rolling buffer. */
97ee4cdf7bSDavid Howells 	if (netfs_buffer_append_folio(wreq, folio, false) < 0)
98ee4cdf7bSDavid Howells 		return -ENOMEM;
99ee4cdf7bSDavid Howells 
100*c4f1450eSDavid Howells 	cache->submit_extendable_to = fsize;
101ee4cdf7bSDavid Howells 	cache->submit_off = 0;
102ee4cdf7bSDavid Howells 	cache->submit_len = flen;
103ee4cdf7bSDavid Howells 
104ee4cdf7bSDavid Howells 	/* Attach the folio to one or more subrequests.  For a big folio, we
105ee4cdf7bSDavid Howells 	 * could end up with thousands of subrequests if the wsize is small -
106ee4cdf7bSDavid Howells 	 * but we might need to wait during the creation of subrequests for
107ee4cdf7bSDavid Howells 	 * network resources (eg. SMB credits).
108ee4cdf7bSDavid Howells 	 */
109ee4cdf7bSDavid Howells 	do {
110ee4cdf7bSDavid Howells 		ssize_t part;
111ee4cdf7bSDavid Howells 
112ee4cdf7bSDavid Howells 		wreq->io_iter.iov_offset = cache->submit_off;
113ee4cdf7bSDavid Howells 
114ee4cdf7bSDavid Howells 		atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
115*c4f1450eSDavid Howells 		cache->submit_extendable_to = fsize - cache->submit_off;
116ee4cdf7bSDavid Howells 		part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
117ee4cdf7bSDavid Howells 					   cache->submit_len, to_eof);
118ee4cdf7bSDavid Howells 		cache->submit_off += part;
119ee4cdf7bSDavid Howells 		if (part > cache->submit_len)
120ee4cdf7bSDavid Howells 			cache->submit_len = 0;
121ee4cdf7bSDavid Howells 		else
122ee4cdf7bSDavid Howells 			cache->submit_len -= part;
123ee4cdf7bSDavid Howells 	} while (cache->submit_len > 0);
124ee4cdf7bSDavid Howells 
125ee4cdf7bSDavid Howells 	wreq->io_iter.iov_offset = 0;
126ee4cdf7bSDavid Howells 	iov_iter_advance(&wreq->io_iter, fsize);
127ee4cdf7bSDavid Howells 	atomic64_set(&wreq->issued_to, fpos + fsize);
128ee4cdf7bSDavid Howells 
129ee4cdf7bSDavid Howells 	if (flen < fsize)
130ee4cdf7bSDavid Howells 		netfs_issue_write(wreq, cache);
131ee4cdf7bSDavid Howells 
132ee4cdf7bSDavid Howells 	_leave(" = 0");
133ee4cdf7bSDavid Howells 	return 0;
134ee4cdf7bSDavid Howells }
135ee4cdf7bSDavid Howells 
136ee4cdf7bSDavid Howells /*
137ee4cdf7bSDavid Howells  * [DEPRECATED] Go through the buffer and write any folios that are marked with
138ee4cdf7bSDavid Howells  * the third mark to the cache.
139ee4cdf7bSDavid Howells  */
140ee4cdf7bSDavid Howells void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
141ee4cdf7bSDavid Howells {
142ee4cdf7bSDavid Howells 	struct netfs_io_request *wreq;
143ee4cdf7bSDavid Howells 	struct folio_queue *folioq;
144ee4cdf7bSDavid Howells 	struct folio *folio;
145ee4cdf7bSDavid Howells 	int error = 0;
146ee4cdf7bSDavid Howells 	int slot = 0;
147ee4cdf7bSDavid Howells 
148ee4cdf7bSDavid Howells 	_enter("");
149ee4cdf7bSDavid Howells 
150ee4cdf7bSDavid Howells 	if (!fscache_resources_valid(&rreq->cache_resources))
151ee4cdf7bSDavid Howells 		goto couldnt_start;
152ee4cdf7bSDavid Howells 
153ee4cdf7bSDavid Howells 	/* Need the first folio to be able to set up the op. */
154ee4cdf7bSDavid Howells 	for (folioq = rreq->buffer; folioq; folioq = folioq->next) {
155ee4cdf7bSDavid Howells 		if (folioq->marks3) {
156ee4cdf7bSDavid Howells 			slot = __ffs(folioq->marks3);
157ee4cdf7bSDavid Howells 			break;
158ee4cdf7bSDavid Howells 		}
159ee4cdf7bSDavid Howells 	}
160ee4cdf7bSDavid Howells 	if (!folioq)
161ee4cdf7bSDavid Howells 		return;
162ee4cdf7bSDavid Howells 	folio = folioq_folio(folioq, slot);
163ee4cdf7bSDavid Howells 
164ee4cdf7bSDavid Howells 	wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
165ee4cdf7bSDavid Howells 				      NETFS_PGPRIV2_COPY_TO_CACHE);
166ee4cdf7bSDavid Howells 	if (IS_ERR(wreq)) {
167ee4cdf7bSDavid Howells 		kleave(" [create %ld]", PTR_ERR(wreq));
168ee4cdf7bSDavid Howells 		goto couldnt_start;
169ee4cdf7bSDavid Howells 	}
170ee4cdf7bSDavid Howells 
171ee4cdf7bSDavid Howells 	trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
172ee4cdf7bSDavid Howells 	netfs_stat(&netfs_n_wh_copy_to_cache);
173ee4cdf7bSDavid Howells 
174ee4cdf7bSDavid Howells 	for (;;) {
175ee4cdf7bSDavid Howells 		error = netfs_pgpriv2_copy_folio(wreq, folio);
176ee4cdf7bSDavid Howells 		if (error < 0)
177ee4cdf7bSDavid Howells 			break;
178ee4cdf7bSDavid Howells 
179ee4cdf7bSDavid Howells 		folioq_unmark3(folioq, slot);
180ee4cdf7bSDavid Howells 		if (!folioq->marks3) {
181ee4cdf7bSDavid Howells 			folioq = folioq->next;
182ee4cdf7bSDavid Howells 			if (!folioq)
183ee4cdf7bSDavid Howells 				break;
184ee4cdf7bSDavid Howells 		}
185ee4cdf7bSDavid Howells 
186ee4cdf7bSDavid Howells 		slot = __ffs(folioq->marks3);
187ee4cdf7bSDavid Howells 		folio = folioq_folio(folioq, slot);
188ee4cdf7bSDavid Howells 	}
189ee4cdf7bSDavid Howells 
190ee4cdf7bSDavid Howells 	netfs_issue_write(wreq, &wreq->io_streams[1]);
191ee4cdf7bSDavid Howells 	smp_wmb(); /* Write lists before ALL_QUEUED. */
192ee4cdf7bSDavid Howells 	set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
193ee4cdf7bSDavid Howells 
194ee4cdf7bSDavid Howells 	netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
195ee4cdf7bSDavid Howells 	_leave(" = %d", error);
196ee4cdf7bSDavid Howells couldnt_start:
197ee4cdf7bSDavid Howells 	netfs_pgpriv2_cancel(rreq->buffer);
198ee4cdf7bSDavid Howells }
199ee4cdf7bSDavid Howells 
200ee4cdf7bSDavid Howells /*
201ee4cdf7bSDavid Howells  * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
202ee4cdf7bSDavid Howells  * copying.
203ee4cdf7bSDavid Howells  */
204ee4cdf7bSDavid Howells bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
205ee4cdf7bSDavid Howells {
206ee4cdf7bSDavid Howells 	struct folio_queue *folioq = wreq->buffer;
207ee4cdf7bSDavid Howells 	unsigned long long collected_to = wreq->collected_to;
208ee4cdf7bSDavid Howells 	unsigned int slot = wreq->buffer_head_slot;
209ee4cdf7bSDavid Howells 	bool made_progress = false;
210ee4cdf7bSDavid Howells 
211ee4cdf7bSDavid Howells 	if (slot >= folioq_nr_slots(folioq)) {
212ee4cdf7bSDavid Howells 		folioq = netfs_delete_buffer_head(wreq);
213ee4cdf7bSDavid Howells 		slot = 0;
214ee4cdf7bSDavid Howells 	}
215ee4cdf7bSDavid Howells 
216ee4cdf7bSDavid Howells 	for (;;) {
217ee4cdf7bSDavid Howells 		struct folio *folio;
218ee4cdf7bSDavid Howells 		unsigned long long fpos, fend;
219ee4cdf7bSDavid Howells 		size_t fsize, flen;
220ee4cdf7bSDavid Howells 
221ee4cdf7bSDavid Howells 		folio = folioq_folio(folioq, slot);
222ee4cdf7bSDavid Howells 		if (WARN_ONCE(!folio_test_private_2(folio),
223ee4cdf7bSDavid Howells 			      "R=%08x: folio %lx is not marked private_2\n",
224ee4cdf7bSDavid Howells 			      wreq->debug_id, folio->index))
225ee4cdf7bSDavid Howells 			trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
226ee4cdf7bSDavid Howells 
227ee4cdf7bSDavid Howells 		fpos = folio_pos(folio);
228ee4cdf7bSDavid Howells 		fsize = folio_size(folio);
229ee4cdf7bSDavid Howells 		flen = fsize;
230ee4cdf7bSDavid Howells 
231ee4cdf7bSDavid Howells 		fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
232ee4cdf7bSDavid Howells 
233ee4cdf7bSDavid Howells 		trace_netfs_collect_folio(wreq, folio, fend, collected_to);
234ee4cdf7bSDavid Howells 
235ee4cdf7bSDavid Howells 		/* Unlock any folio we've transferred all of. */
236ee4cdf7bSDavid Howells 		if (collected_to < fend)
237ee4cdf7bSDavid Howells 			break;
238ee4cdf7bSDavid Howells 
239ee4cdf7bSDavid Howells 		trace_netfs_folio(folio, netfs_folio_trace_end_copy);
240ee4cdf7bSDavid Howells 		folio_end_private_2(folio);
241ee4cdf7bSDavid Howells 		wreq->cleaned_to = fpos + fsize;
242ee4cdf7bSDavid Howells 		made_progress = true;
243ee4cdf7bSDavid Howells 
244ee4cdf7bSDavid Howells 		/* Clean up the head folioq.  If we clear an entire folioq, then
245ee4cdf7bSDavid Howells 		 * we can get rid of it provided it's not also the tail folioq
246ee4cdf7bSDavid Howells 		 * being filled by the issuer.
247ee4cdf7bSDavid Howells 		 */
248ee4cdf7bSDavid Howells 		folioq_clear(folioq, slot);
249ee4cdf7bSDavid Howells 		slot++;
250ee4cdf7bSDavid Howells 		if (slot >= folioq_nr_slots(folioq)) {
251ee4cdf7bSDavid Howells 			if (READ_ONCE(wreq->buffer_tail) == folioq)
252ee4cdf7bSDavid Howells 				break;
253ee4cdf7bSDavid Howells 			folioq = netfs_delete_buffer_head(wreq);
254ee4cdf7bSDavid Howells 			slot = 0;
255ee4cdf7bSDavid Howells 		}
256ee4cdf7bSDavid Howells 
257ee4cdf7bSDavid Howells 		if (fpos + fsize >= collected_to)
258ee4cdf7bSDavid Howells 			break;
259ee4cdf7bSDavid Howells 	}
260ee4cdf7bSDavid Howells 
261ee4cdf7bSDavid Howells 	wreq->buffer = folioq;
262ee4cdf7bSDavid Howells 	wreq->buffer_head_slot = slot;
263ee4cdf7bSDavid Howells 	return made_progress;
264ee4cdf7bSDavid Howells }
265