xref: /linux/fs/netfs/read_pgpriv2.c (revision ee4cdf7ba857a894ad1650d6ab77669cbbfa329e)
1*ee4cdf7bSDavid Howells // SPDX-License-Identifier: GPL-2.0-only
2*ee4cdf7bSDavid Howells /* Read with PG_private_2 [DEPRECATED].
3*ee4cdf7bSDavid Howells  *
4*ee4cdf7bSDavid Howells  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5*ee4cdf7bSDavid Howells  * Written by David Howells (dhowells@redhat.com)
6*ee4cdf7bSDavid Howells  */
7*ee4cdf7bSDavid Howells 
8*ee4cdf7bSDavid Howells #include <linux/export.h>
9*ee4cdf7bSDavid Howells #include <linux/fs.h>
10*ee4cdf7bSDavid Howells #include <linux/mm.h>
11*ee4cdf7bSDavid Howells #include <linux/pagemap.h>
12*ee4cdf7bSDavid Howells #include <linux/slab.h>
13*ee4cdf7bSDavid Howells #include <linux/task_io_accounting_ops.h>
14*ee4cdf7bSDavid Howells #include "internal.h"
15*ee4cdf7bSDavid Howells 
16*ee4cdf7bSDavid Howells /*
17*ee4cdf7bSDavid Howells  * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2.  The
18*ee4cdf7bSDavid Howells  * third mark in the folio queue is used to indicate that this folio needs
19*ee4cdf7bSDavid Howells  * writing.
20*ee4cdf7bSDavid Howells  */
21*ee4cdf7bSDavid Howells void netfs_pgpriv2_mark_copy_to_cache(struct netfs_io_subrequest *subreq,
22*ee4cdf7bSDavid Howells 				      struct netfs_io_request *rreq,
23*ee4cdf7bSDavid Howells 				      struct folio_queue *folioq,
24*ee4cdf7bSDavid Howells 				      int slot)
25*ee4cdf7bSDavid Howells {
26*ee4cdf7bSDavid Howells 	struct folio *folio = folioq_folio(folioq, slot);
27*ee4cdf7bSDavid Howells 
28*ee4cdf7bSDavid Howells 	trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
29*ee4cdf7bSDavid Howells 	folio_start_private_2(folio);
30*ee4cdf7bSDavid Howells 	folioq_mark3(folioq, slot);
31*ee4cdf7bSDavid Howells }
32*ee4cdf7bSDavid Howells 
33*ee4cdf7bSDavid Howells /*
34*ee4cdf7bSDavid Howells  * [DEPRECATED] Cancel PG_private_2 on all marked folios in the event of an
35*ee4cdf7bSDavid Howells  * unrecoverable error.
36*ee4cdf7bSDavid Howells  */
37*ee4cdf7bSDavid Howells static void netfs_pgpriv2_cancel(struct folio_queue *folioq)
38*ee4cdf7bSDavid Howells {
39*ee4cdf7bSDavid Howells 	struct folio *folio;
40*ee4cdf7bSDavid Howells 	int slot;
41*ee4cdf7bSDavid Howells 
42*ee4cdf7bSDavid Howells 	while (folioq) {
43*ee4cdf7bSDavid Howells 		if (!folioq->marks3) {
44*ee4cdf7bSDavid Howells 			folioq = folioq->next;
45*ee4cdf7bSDavid Howells 			continue;
46*ee4cdf7bSDavid Howells 		}
47*ee4cdf7bSDavid Howells 
48*ee4cdf7bSDavid Howells 		slot = __ffs(folioq->marks3);
49*ee4cdf7bSDavid Howells 		folio = folioq_folio(folioq, slot);
50*ee4cdf7bSDavid Howells 
51*ee4cdf7bSDavid Howells 		trace_netfs_folio(folio, netfs_folio_trace_cancel_copy);
52*ee4cdf7bSDavid Howells 		folio_end_private_2(folio);
53*ee4cdf7bSDavid Howells 		folioq_unmark3(folioq, slot);
54*ee4cdf7bSDavid Howells 	}
55*ee4cdf7bSDavid Howells }
56*ee4cdf7bSDavid Howells 
57*ee4cdf7bSDavid Howells /*
58*ee4cdf7bSDavid Howells  * [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
59*ee4cdf7bSDavid Howells  */
60*ee4cdf7bSDavid Howells static int netfs_pgpriv2_copy_folio(struct netfs_io_request *wreq, struct folio *folio)
61*ee4cdf7bSDavid Howells {
62*ee4cdf7bSDavid Howells 	struct netfs_io_stream *cache  = &wreq->io_streams[1];
63*ee4cdf7bSDavid Howells 	size_t fsize = folio_size(folio), flen = fsize;
64*ee4cdf7bSDavid Howells 	loff_t fpos = folio_pos(folio), i_size;
65*ee4cdf7bSDavid Howells 	bool to_eof = false;
66*ee4cdf7bSDavid Howells 
67*ee4cdf7bSDavid Howells 	_enter("");
68*ee4cdf7bSDavid Howells 
69*ee4cdf7bSDavid Howells 	/* netfs_perform_write() may shift i_size around the page or from out
70*ee4cdf7bSDavid Howells 	 * of the page to beyond it, but cannot move i_size into or through the
71*ee4cdf7bSDavid Howells 	 * page since we have it locked.
72*ee4cdf7bSDavid Howells 	 */
73*ee4cdf7bSDavid Howells 	i_size = i_size_read(wreq->inode);
74*ee4cdf7bSDavid Howells 
75*ee4cdf7bSDavid Howells 	if (fpos >= i_size) {
76*ee4cdf7bSDavid Howells 		/* mmap beyond eof. */
77*ee4cdf7bSDavid Howells 		_debug("beyond eof");
78*ee4cdf7bSDavid Howells 		folio_end_private_2(folio);
79*ee4cdf7bSDavid Howells 		return 0;
80*ee4cdf7bSDavid Howells 	}
81*ee4cdf7bSDavid Howells 
82*ee4cdf7bSDavid Howells 	if (fpos + fsize > wreq->i_size)
83*ee4cdf7bSDavid Howells 		wreq->i_size = i_size;
84*ee4cdf7bSDavid Howells 
85*ee4cdf7bSDavid Howells 	if (flen > i_size - fpos) {
86*ee4cdf7bSDavid Howells 		flen = i_size - fpos;
87*ee4cdf7bSDavid Howells 		to_eof = true;
88*ee4cdf7bSDavid Howells 	} else if (flen == i_size - fpos) {
89*ee4cdf7bSDavid Howells 		to_eof = true;
90*ee4cdf7bSDavid Howells 	}
91*ee4cdf7bSDavid Howells 
92*ee4cdf7bSDavid Howells 	_debug("folio %zx %zx", flen, fsize);
93*ee4cdf7bSDavid Howells 
94*ee4cdf7bSDavid Howells 	trace_netfs_folio(folio, netfs_folio_trace_store_copy);
95*ee4cdf7bSDavid Howells 
96*ee4cdf7bSDavid Howells 	/* Attach the folio to the rolling buffer. */
97*ee4cdf7bSDavid Howells 	if (netfs_buffer_append_folio(wreq, folio, false) < 0)
98*ee4cdf7bSDavid Howells 		return -ENOMEM;
99*ee4cdf7bSDavid Howells 
100*ee4cdf7bSDavid Howells 	cache->submit_max_len = fsize;
101*ee4cdf7bSDavid Howells 	cache->submit_off = 0;
102*ee4cdf7bSDavid Howells 	cache->submit_len = flen;
103*ee4cdf7bSDavid Howells 
104*ee4cdf7bSDavid Howells 	/* Attach the folio to one or more subrequests.  For a big folio, we
105*ee4cdf7bSDavid Howells 	 * could end up with thousands of subrequests if the wsize is small -
106*ee4cdf7bSDavid Howells 	 * but we might need to wait during the creation of subrequests for
107*ee4cdf7bSDavid Howells 	 * network resources (eg. SMB credits).
108*ee4cdf7bSDavid Howells 	 */
109*ee4cdf7bSDavid Howells 	do {
110*ee4cdf7bSDavid Howells 		ssize_t part;
111*ee4cdf7bSDavid Howells 
112*ee4cdf7bSDavid Howells 		wreq->io_iter.iov_offset = cache->submit_off;
113*ee4cdf7bSDavid Howells 
114*ee4cdf7bSDavid Howells 		atomic64_set(&wreq->issued_to, fpos + cache->submit_off);
115*ee4cdf7bSDavid Howells 		part = netfs_advance_write(wreq, cache, fpos + cache->submit_off,
116*ee4cdf7bSDavid Howells 					   cache->submit_len, to_eof);
117*ee4cdf7bSDavid Howells 		cache->submit_off += part;
118*ee4cdf7bSDavid Howells 		cache->submit_max_len -= part;
119*ee4cdf7bSDavid Howells 		if (part > cache->submit_len)
120*ee4cdf7bSDavid Howells 			cache->submit_len = 0;
121*ee4cdf7bSDavid Howells 		else
122*ee4cdf7bSDavid Howells 			cache->submit_len -= part;
123*ee4cdf7bSDavid Howells 	} while (cache->submit_len > 0);
124*ee4cdf7bSDavid Howells 
125*ee4cdf7bSDavid Howells 	wreq->io_iter.iov_offset = 0;
126*ee4cdf7bSDavid Howells 	iov_iter_advance(&wreq->io_iter, fsize);
127*ee4cdf7bSDavid Howells 	atomic64_set(&wreq->issued_to, fpos + fsize);
128*ee4cdf7bSDavid Howells 
129*ee4cdf7bSDavid Howells 	if (flen < fsize)
130*ee4cdf7bSDavid Howells 		netfs_issue_write(wreq, cache);
131*ee4cdf7bSDavid Howells 
132*ee4cdf7bSDavid Howells 	_leave(" = 0");
133*ee4cdf7bSDavid Howells 	return 0;
134*ee4cdf7bSDavid Howells }
135*ee4cdf7bSDavid Howells 
136*ee4cdf7bSDavid Howells /*
137*ee4cdf7bSDavid Howells  * [DEPRECATED] Go through the buffer and write any folios that are marked with
138*ee4cdf7bSDavid Howells  * the third mark to the cache.
139*ee4cdf7bSDavid Howells  */
140*ee4cdf7bSDavid Howells void netfs_pgpriv2_write_to_the_cache(struct netfs_io_request *rreq)
141*ee4cdf7bSDavid Howells {
142*ee4cdf7bSDavid Howells 	struct netfs_io_request *wreq;
143*ee4cdf7bSDavid Howells 	struct folio_queue *folioq;
144*ee4cdf7bSDavid Howells 	struct folio *folio;
145*ee4cdf7bSDavid Howells 	int error = 0;
146*ee4cdf7bSDavid Howells 	int slot = 0;
147*ee4cdf7bSDavid Howells 
148*ee4cdf7bSDavid Howells 	_enter("");
149*ee4cdf7bSDavid Howells 
150*ee4cdf7bSDavid Howells 	if (!fscache_resources_valid(&rreq->cache_resources))
151*ee4cdf7bSDavid Howells 		goto couldnt_start;
152*ee4cdf7bSDavid Howells 
153*ee4cdf7bSDavid Howells 	/* Need the first folio to be able to set up the op. */
154*ee4cdf7bSDavid Howells 	for (folioq = rreq->buffer; folioq; folioq = folioq->next) {
155*ee4cdf7bSDavid Howells 		if (folioq->marks3) {
156*ee4cdf7bSDavid Howells 			slot = __ffs(folioq->marks3);
157*ee4cdf7bSDavid Howells 			break;
158*ee4cdf7bSDavid Howells 		}
159*ee4cdf7bSDavid Howells 	}
160*ee4cdf7bSDavid Howells 	if (!folioq)
161*ee4cdf7bSDavid Howells 		return;
162*ee4cdf7bSDavid Howells 	folio = folioq_folio(folioq, slot);
163*ee4cdf7bSDavid Howells 
164*ee4cdf7bSDavid Howells 	wreq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
165*ee4cdf7bSDavid Howells 				      NETFS_PGPRIV2_COPY_TO_CACHE);
166*ee4cdf7bSDavid Howells 	if (IS_ERR(wreq)) {
167*ee4cdf7bSDavid Howells 		kleave(" [create %ld]", PTR_ERR(wreq));
168*ee4cdf7bSDavid Howells 		goto couldnt_start;
169*ee4cdf7bSDavid Howells 	}
170*ee4cdf7bSDavid Howells 
171*ee4cdf7bSDavid Howells 	trace_netfs_write(wreq, netfs_write_trace_copy_to_cache);
172*ee4cdf7bSDavid Howells 	netfs_stat(&netfs_n_wh_copy_to_cache);
173*ee4cdf7bSDavid Howells 
174*ee4cdf7bSDavid Howells 	for (;;) {
175*ee4cdf7bSDavid Howells 		error = netfs_pgpriv2_copy_folio(wreq, folio);
176*ee4cdf7bSDavid Howells 		if (error < 0)
177*ee4cdf7bSDavid Howells 			break;
178*ee4cdf7bSDavid Howells 
179*ee4cdf7bSDavid Howells 		folioq_unmark3(folioq, slot);
180*ee4cdf7bSDavid Howells 		if (!folioq->marks3) {
181*ee4cdf7bSDavid Howells 			folioq = folioq->next;
182*ee4cdf7bSDavid Howells 			if (!folioq)
183*ee4cdf7bSDavid Howells 				break;
184*ee4cdf7bSDavid Howells 		}
185*ee4cdf7bSDavid Howells 
186*ee4cdf7bSDavid Howells 		slot = __ffs(folioq->marks3);
187*ee4cdf7bSDavid Howells 		folio = folioq_folio(folioq, slot);
188*ee4cdf7bSDavid Howells 	}
189*ee4cdf7bSDavid Howells 
190*ee4cdf7bSDavid Howells 	netfs_issue_write(wreq, &wreq->io_streams[1]);
191*ee4cdf7bSDavid Howells 	smp_wmb(); /* Write lists before ALL_QUEUED. */
192*ee4cdf7bSDavid Howells 	set_bit(NETFS_RREQ_ALL_QUEUED, &wreq->flags);
193*ee4cdf7bSDavid Howells 
194*ee4cdf7bSDavid Howells 	netfs_put_request(wreq, false, netfs_rreq_trace_put_return);
195*ee4cdf7bSDavid Howells 	_leave(" = %d", error);
196*ee4cdf7bSDavid Howells couldnt_start:
197*ee4cdf7bSDavid Howells 	netfs_pgpriv2_cancel(rreq->buffer);
198*ee4cdf7bSDavid Howells }
199*ee4cdf7bSDavid Howells 
200*ee4cdf7bSDavid Howells /*
201*ee4cdf7bSDavid Howells  * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
202*ee4cdf7bSDavid Howells  * copying.
203*ee4cdf7bSDavid Howells  */
204*ee4cdf7bSDavid Howells bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *wreq)
205*ee4cdf7bSDavid Howells {
206*ee4cdf7bSDavid Howells 	struct folio_queue *folioq = wreq->buffer;
207*ee4cdf7bSDavid Howells 	unsigned long long collected_to = wreq->collected_to;
208*ee4cdf7bSDavid Howells 	unsigned int slot = wreq->buffer_head_slot;
209*ee4cdf7bSDavid Howells 	bool made_progress = false;
210*ee4cdf7bSDavid Howells 
211*ee4cdf7bSDavid Howells 	if (slot >= folioq_nr_slots(folioq)) {
212*ee4cdf7bSDavid Howells 		folioq = netfs_delete_buffer_head(wreq);
213*ee4cdf7bSDavid Howells 		slot = 0;
214*ee4cdf7bSDavid Howells 	}
215*ee4cdf7bSDavid Howells 
216*ee4cdf7bSDavid Howells 	for (;;) {
217*ee4cdf7bSDavid Howells 		struct folio *folio;
218*ee4cdf7bSDavid Howells 		unsigned long long fpos, fend;
219*ee4cdf7bSDavid Howells 		size_t fsize, flen;
220*ee4cdf7bSDavid Howells 
221*ee4cdf7bSDavid Howells 		folio = folioq_folio(folioq, slot);
222*ee4cdf7bSDavid Howells 		if (WARN_ONCE(!folio_test_private_2(folio),
223*ee4cdf7bSDavid Howells 			      "R=%08x: folio %lx is not marked private_2\n",
224*ee4cdf7bSDavid Howells 			      wreq->debug_id, folio->index))
225*ee4cdf7bSDavid Howells 			trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
226*ee4cdf7bSDavid Howells 
227*ee4cdf7bSDavid Howells 		fpos = folio_pos(folio);
228*ee4cdf7bSDavid Howells 		fsize = folio_size(folio);
229*ee4cdf7bSDavid Howells 		flen = fsize;
230*ee4cdf7bSDavid Howells 
231*ee4cdf7bSDavid Howells 		fend = min_t(unsigned long long, fpos + flen, wreq->i_size);
232*ee4cdf7bSDavid Howells 
233*ee4cdf7bSDavid Howells 		trace_netfs_collect_folio(wreq, folio, fend, collected_to);
234*ee4cdf7bSDavid Howells 
235*ee4cdf7bSDavid Howells 		/* Unlock any folio we've transferred all of. */
236*ee4cdf7bSDavid Howells 		if (collected_to < fend)
237*ee4cdf7bSDavid Howells 			break;
238*ee4cdf7bSDavid Howells 
239*ee4cdf7bSDavid Howells 		trace_netfs_folio(folio, netfs_folio_trace_end_copy);
240*ee4cdf7bSDavid Howells 		folio_end_private_2(folio);
241*ee4cdf7bSDavid Howells 		wreq->cleaned_to = fpos + fsize;
242*ee4cdf7bSDavid Howells 		made_progress = true;
243*ee4cdf7bSDavid Howells 
244*ee4cdf7bSDavid Howells 		/* Clean up the head folioq.  If we clear an entire folioq, then
245*ee4cdf7bSDavid Howells 		 * we can get rid of it provided it's not also the tail folioq
246*ee4cdf7bSDavid Howells 		 * being filled by the issuer.
247*ee4cdf7bSDavid Howells 		 */
248*ee4cdf7bSDavid Howells 		folioq_clear(folioq, slot);
249*ee4cdf7bSDavid Howells 		slot++;
250*ee4cdf7bSDavid Howells 		if (slot >= folioq_nr_slots(folioq)) {
251*ee4cdf7bSDavid Howells 			if (READ_ONCE(wreq->buffer_tail) == folioq)
252*ee4cdf7bSDavid Howells 				break;
253*ee4cdf7bSDavid Howells 			folioq = netfs_delete_buffer_head(wreq);
254*ee4cdf7bSDavid Howells 			slot = 0;
255*ee4cdf7bSDavid Howells 		}
256*ee4cdf7bSDavid Howells 
257*ee4cdf7bSDavid Howells 		if (fpos + fsize >= collected_to)
258*ee4cdf7bSDavid Howells 			break;
259*ee4cdf7bSDavid Howells 	}
260*ee4cdf7bSDavid Howells 
261*ee4cdf7bSDavid Howells 	wreq->buffer = folioq;
262*ee4cdf7bSDavid Howells 	wreq->buffer_head_slot = slot;
263*ee4cdf7bSDavid Howells 	return made_progress;
264*ee4cdf7bSDavid Howells }
265