xref: /linux/fs/netfs/read_pgpriv2.c (revision 1c6aa1121e7a5cd296d7038dbdd619da8bee1cd5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Read with PG_private_2 [DEPRECATED].
3  *
4  * Copyright (C) 2024 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/export.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/task_io_accounting_ops.h>
14 #include "internal.h"
15 
16 /*
17  * [DEPRECATED] Copy a folio to the cache with PG_private_2 set.
18  */
netfs_pgpriv2_copy_folio(struct netfs_io_request * creq,struct folio * folio)19 static void netfs_pgpriv2_copy_folio(struct netfs_io_request *creq, struct folio *folio)
20 {
21 	struct netfs_io_stream *cache = &creq->io_streams[1];
22 	size_t fsize = folio_size(folio), flen = fsize;
23 	loff_t fpos = folio_pos(folio), i_size;
24 	bool to_eof = false;
25 
26 	_enter("");
27 
28 	/* netfs_perform_write() may shift i_size around the page or from out
29 	 * of the page to beyond it, but cannot move i_size into or through the
30 	 * page since we have it locked.
31 	 */
32 	i_size = i_size_read(creq->inode);
33 
34 	if (fpos >= i_size) {
35 		/* mmap beyond eof. */
36 		_debug("beyond eof");
37 		folio_end_private_2(folio);
38 		return;
39 	}
40 
41 	if (fpos + fsize > creq->i_size)
42 		creq->i_size = i_size;
43 
44 	if (flen > i_size - fpos) {
45 		flen = i_size - fpos;
46 		to_eof = true;
47 	} else if (flen == i_size - fpos) {
48 		to_eof = true;
49 	}
50 
51 	_debug("folio %zx %zx", flen, fsize);
52 
53 	trace_netfs_folio(folio, netfs_folio_trace_store_copy);
54 
55 	/* Attach the folio to the rolling buffer. */
56 	if (rolling_buffer_append(&creq->buffer, folio, 0) < 0) {
57 		clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &creq->flags);
58 		return;
59 	}
60 
61 	cache->submit_extendable_to = fsize;
62 	cache->submit_off = 0;
63 	cache->submit_len = flen;
64 
65 	/* Attach the folio to one or more subrequests.  For a big folio, we
66 	 * could end up with thousands of subrequests if the wsize is small -
67 	 * but we might need to wait during the creation of subrequests for
68 	 * network resources (eg. SMB credits).
69 	 */
70 	do {
71 		ssize_t part;
72 
73 		creq->buffer.iter.iov_offset = cache->submit_off;
74 
75 		atomic64_set(&creq->issued_to, fpos + cache->submit_off);
76 		cache->submit_extendable_to = fsize - cache->submit_off;
77 		part = netfs_advance_write(creq, cache, fpos + cache->submit_off,
78 					   cache->submit_len, to_eof);
79 		cache->submit_off += part;
80 		if (part > cache->submit_len)
81 			cache->submit_len = 0;
82 		else
83 			cache->submit_len -= part;
84 	} while (cache->submit_len > 0);
85 
86 	creq->buffer.iter.iov_offset = 0;
87 	rolling_buffer_advance(&creq->buffer, fsize);
88 	atomic64_set(&creq->issued_to, fpos + fsize);
89 
90 	if (flen < fsize)
91 		netfs_issue_write(creq, cache);
92 }
93 
94 /*
95  * [DEPRECATED] Set up copying to the cache.
96  */
netfs_pgpriv2_begin_copy_to_cache(struct netfs_io_request * rreq,struct folio * folio)97 static struct netfs_io_request *netfs_pgpriv2_begin_copy_to_cache(
98 	struct netfs_io_request *rreq, struct folio *folio)
99 {
100 	struct netfs_io_request *creq;
101 
102 	if (!fscache_resources_valid(&rreq->cache_resources))
103 		goto cancel;
104 
105 	creq = netfs_create_write_req(rreq->mapping, NULL, folio_pos(folio),
106 				      NETFS_PGPRIV2_COPY_TO_CACHE);
107 	if (IS_ERR(creq))
108 		goto cancel;
109 
110 	if (!creq->io_streams[1].avail)
111 		goto cancel_put;
112 
113 	__set_bit(NETFS_RREQ_OFFLOAD_COLLECTION, &creq->flags);
114 	trace_netfs_copy2cache(rreq, creq);
115 	trace_netfs_write(creq, netfs_write_trace_copy_to_cache);
116 	netfs_stat(&netfs_n_wh_copy_to_cache);
117 	rreq->copy_to_cache = creq;
118 	return creq;
119 
120 cancel_put:
121 	netfs_put_request(creq, netfs_rreq_trace_put_return);
122 cancel:
123 	rreq->copy_to_cache = ERR_PTR(-ENOBUFS);
124 	clear_bit(NETFS_RREQ_FOLIO_COPY_TO_CACHE, &rreq->flags);
125 	return ERR_PTR(-ENOBUFS);
126 }
127 
128 /*
129  * [DEPRECATED] Mark page as requiring copy-to-cache using PG_private_2 and add
130  * it to the copy write request.
131  */
netfs_pgpriv2_copy_to_cache(struct netfs_io_request * rreq,struct folio * folio)132 void netfs_pgpriv2_copy_to_cache(struct netfs_io_request *rreq, struct folio *folio)
133 {
134 	struct netfs_io_request *creq = rreq->copy_to_cache;
135 
136 	if (!creq)
137 		creq = netfs_pgpriv2_begin_copy_to_cache(rreq, folio);
138 	if (IS_ERR(creq))
139 		return;
140 
141 	trace_netfs_folio(folio, netfs_folio_trace_copy_to_cache);
142 	folio_start_private_2(folio);
143 	netfs_pgpriv2_copy_folio(creq, folio);
144 }
145 
146 /*
147  * [DEPRECATED] End writing to the cache, flushing out any outstanding writes.
148  */
netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request * rreq)149 void netfs_pgpriv2_end_copy_to_cache(struct netfs_io_request *rreq)
150 {
151 	struct netfs_io_request *creq = rreq->copy_to_cache;
152 
153 	if (IS_ERR_OR_NULL(creq))
154 		return;
155 
156 	netfs_issue_write(creq, &creq->io_streams[1]);
157 	smp_wmb(); /* Write lists before ALL_QUEUED. */
158 	set_bit(NETFS_RREQ_ALL_QUEUED, &creq->flags);
159 	trace_netfs_rreq(rreq, netfs_rreq_trace_end_copy_to_cache);
160 	if (list_empty_careful(&creq->io_streams[1].subrequests))
161 		netfs_wake_collector(creq);
162 
163 	netfs_put_request(creq, netfs_rreq_trace_put_return);
164 	creq->copy_to_cache = NULL;
165 }
166 
167 /*
168  * [DEPRECATED] Remove the PG_private_2 mark from any folios we've finished
169  * copying.
170  */
netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request * creq)171 bool netfs_pgpriv2_unlock_copied_folios(struct netfs_io_request *creq)
172 {
173 	struct folio_queue *folioq = creq->buffer.tail;
174 	unsigned long long collected_to = creq->collected_to;
175 	unsigned int slot = creq->buffer.first_tail_slot;
176 	bool made_progress = false;
177 
178 	if (slot >= folioq_nr_slots(folioq)) {
179 		folioq = rolling_buffer_delete_spent(&creq->buffer);
180 		slot = 0;
181 	}
182 
183 	for (;;) {
184 		struct folio *folio;
185 		unsigned long long fpos, fend;
186 		size_t fsize, flen;
187 
188 		folio = folioq_folio(folioq, slot);
189 		if (WARN_ONCE(!folio_test_private_2(folio),
190 			      "R=%08x: folio %lx is not marked private_2\n",
191 			      creq->debug_id, folio->index))
192 			trace_netfs_folio(folio, netfs_folio_trace_not_under_wback);
193 
194 		fpos = folio_pos(folio);
195 		fsize = folio_size(folio);
196 		flen = fsize;
197 
198 		fend = min_t(unsigned long long, fpos + flen, creq->i_size);
199 
200 		trace_netfs_collect_folio(creq, folio, fend, collected_to);
201 
202 		/* Unlock any folio we've transferred all of. */
203 		if (collected_to < fend)
204 			break;
205 
206 		trace_netfs_folio(folio, netfs_folio_trace_end_copy);
207 		folio_end_private_2(folio);
208 		creq->cleaned_to = fpos + fsize;
209 		made_progress = true;
210 
211 		/* Clean up the head folioq.  If we clear an entire folioq, then
212 		 * we can get rid of it provided it's not also the tail folioq
213 		 * being filled by the issuer.
214 		 */
215 		folioq_clear(folioq, slot);
216 		slot++;
217 		if (slot >= folioq_nr_slots(folioq)) {
218 			folioq = rolling_buffer_delete_spent(&creq->buffer);
219 			if (!folioq)
220 				goto done;
221 			slot = 0;
222 		}
223 
224 		if (fpos + fsize >= collected_to)
225 			break;
226 	}
227 
228 	creq->buffer.tail = folioq;
229 done:
230 	creq->buffer.first_tail_slot = slot;
231 	return made_progress;
232 }
233