xref: /linux/fs/nfs/read.c (revision 55f1b540d893da740a81200450014c45a8103f54)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/read.c
4  *
5  * Block I/O for NFS
6  *
7  * Partial copy of Linus' read cache modifications to fs/nfs/file.c
8  * modified for async RPC by okir@monad.swb.de
9  */
10 
11 #include <linux/time.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/fcntl.h>
15 #include <linux/stat.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/task_io_accounting_ops.h>
19 #include <linux/pagemap.h>
20 #include <linux/sunrpc/clnt.h>
21 #include <linux/nfs_fs.h>
22 #include <linux/nfs_page.h>
23 #include <linux/module.h>
24 
25 #include "nfs4_fs.h"
26 #include "internal.h"
27 #include "iostat.h"
28 #include "fscache.h"
29 #include "pnfs.h"
30 #include "nfstrace.h"
31 #include "delegation.h"
32 
33 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
34 
35 const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
36 static const struct nfs_rw_ops nfs_rw_read_ops;
37 
38 static struct kmem_cache *nfs_rdata_cachep;
39 
40 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
41 {
42 	struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
43 
44 	if (p)
45 		p->rw_mode = FMODE_READ;
46 	return p;
47 }
48 
49 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
50 {
51 	kfree(rhdr->res.scratch);
52 	kmem_cache_free(nfs_rdata_cachep, rhdr);
53 }
54 
55 static int nfs_return_empty_folio(struct folio *folio)
56 {
57 	folio_zero_segment(folio, 0, folio_size(folio));
58 	folio_mark_uptodate(folio);
59 	folio_unlock(folio);
60 	return 0;
61 }
62 
63 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
64 			      struct inode *inode, bool force_mds,
65 			      const struct nfs_pgio_completion_ops *compl_ops)
66 {
67 	struct nfs_server *server = NFS_SERVER(inode);
68 	const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
69 
70 #ifdef CONFIG_NFS_V4_1
71 	if (server->pnfs_curr_ld && !force_mds)
72 		pg_ops = server->pnfs_curr_ld->pg_read_ops;
73 #endif
74 	nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
75 			server->rsize, 0);
76 }
77 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
78 
79 void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
80 {
81 	struct nfs_pgio_mirror *pgm;
82 	unsigned long npages;
83 
84 	nfs_pageio_complete(pgio);
85 
86 	/* It doesn't make sense to do mirrored reads! */
87 	WARN_ON_ONCE(pgio->pg_mirror_count != 1);
88 
89 	pgm = &pgio->pg_mirrors[0];
90 	NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
91 	npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
92 	nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
93 }
94 
95 
96 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
97 {
98 	struct nfs_pgio_mirror *mirror;
99 
100 	if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
101 		pgio->pg_ops->pg_cleanup(pgio);
102 
103 	pgio->pg_ops = &nfs_pgio_rw_ops;
104 
105 	/* read path should never have more than one mirror */
106 	WARN_ON_ONCE(pgio->pg_mirror_count != 1);
107 
108 	mirror = &pgio->pg_mirrors[0];
109 	mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
110 }
111 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
112 
113 bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size)
114 {
115 	WARN_ON(hdr->res.scratch != NULL);
116 	hdr->res.scratch = kmalloc(size, GFP_KERNEL);
117 	return hdr->res.scratch != NULL;
118 }
119 EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch);
120 
121 static void nfs_readpage_release(struct nfs_page *req, int error)
122 {
123 	struct folio *folio = nfs_page_to_folio(req);
124 
125 	if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
126 		if (nfs_netfs_folio_unlock(folio))
127 			folio_unlock(folio);
128 
129 	nfs_release_request(req);
130 }
131 
132 static void nfs_page_group_set_uptodate(struct nfs_page *req)
133 {
134 	if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
135 		folio_mark_uptodate(nfs_page_to_folio(req));
136 }
137 
138 static void nfs_read_completion(struct nfs_pgio_header *hdr)
139 {
140 	unsigned long bytes = 0;
141 	int error;
142 
143 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
144 		goto out;
145 	while (!list_empty(&hdr->pages)) {
146 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
147 		struct folio *folio = nfs_page_to_folio(req);
148 		unsigned long start = req->wb_pgbase;
149 		unsigned long end = req->wb_pgbase + req->wb_bytes;
150 
151 		if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
152 			/* note: regions of the page not covered by a
153 			 * request are zeroed in nfs_read_add_folio
154 			 */
155 			if (bytes > hdr->good_bytes) {
156 				/* nothing in this request was good, so zero
157 				 * the full extent of the request */
158 				folio_zero_segment(folio, start, end);
159 
160 			} else if (hdr->good_bytes - bytes < req->wb_bytes) {
161 				/* part of this request has good bytes, but
162 				 * not all. zero the bad bytes */
163 				start += hdr->good_bytes - bytes;
164 				WARN_ON(start < req->wb_pgbase);
165 				folio_zero_segment(folio, start, end);
166 			}
167 		}
168 		error = 0;
169 		bytes += req->wb_bytes;
170 		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
171 			if (bytes <= hdr->good_bytes)
172 				nfs_page_group_set_uptodate(req);
173 			else {
174 				error = hdr->error;
175 				xchg(&nfs_req_openctx(req)->error, error);
176 			}
177 		} else
178 			nfs_page_group_set_uptodate(req);
179 		nfs_list_remove_request(req);
180 		nfs_readpage_release(req, error);
181 	}
182 	nfs_netfs_read_completion(hdr);
183 
184 out:
185 	hdr->release(hdr);
186 }
187 
188 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
189 			      struct rpc_message *msg,
190 			      const struct nfs_rpc_ops *rpc_ops,
191 			      struct rpc_task_setup *task_setup_data, int how)
192 {
193 	rpc_ops->read_setup(hdr, msg);
194 	nfs_netfs_initiate_read(hdr);
195 	trace_nfs_initiate_read(hdr);
196 }
197 
198 static void
199 nfs_async_read_error(struct list_head *head, int error)
200 {
201 	struct nfs_page	*req;
202 
203 	while (!list_empty(head)) {
204 		req = nfs_list_entry(head->next);
205 		nfs_list_remove_request(req);
206 		nfs_readpage_release(req, error);
207 	}
208 }
209 
210 const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
211 	.error_cleanup = nfs_async_read_error,
212 	.completion = nfs_read_completion,
213 };
214 
215 /*
216  * This is the callback from RPC telling us whether a reply was
217  * received or some error occurred (timeout or socket shutdown).
218  */
219 static int nfs_readpage_done(struct rpc_task *task,
220 			     struct nfs_pgio_header *hdr,
221 			     struct inode *inode)
222 {
223 	int status = NFS_PROTO(inode)->read_done(task, hdr);
224 	if (status != 0)
225 		return status;
226 
227 	nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
228 	trace_nfs_readpage_done(task, hdr);
229 
230 	if (task->tk_status == -ESTALE) {
231 		nfs_set_inode_stale(inode);
232 		nfs_mark_for_revalidate(inode);
233 	}
234 	return 0;
235 }
236 
237 static void nfs_readpage_retry(struct rpc_task *task,
238 			       struct nfs_pgio_header *hdr)
239 {
240 	struct nfs_pgio_args *argp = &hdr->args;
241 	struct nfs_pgio_res  *resp = &hdr->res;
242 
243 	/* This is a short read! */
244 	nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
245 	trace_nfs_readpage_short(task, hdr);
246 
247 	/* Has the server at least made some progress? */
248 	if (resp->count == 0) {
249 		nfs_set_pgio_error(hdr, -EIO, argp->offset);
250 		return;
251 	}
252 
253 	/* For non rpc-based layout drivers, retry-through-MDS */
254 	if (!task->tk_ops) {
255 		hdr->pnfs_error = -EAGAIN;
256 		return;
257 	}
258 
259 	/* Yes, so retry the read at the end of the hdr */
260 	hdr->mds_offset += resp->count;
261 	argp->offset += resp->count;
262 	argp->pgbase += resp->count;
263 	argp->count -= resp->count;
264 	resp->count = 0;
265 	resp->eof = 0;
266 	rpc_restart_call_prepare(task);
267 }
268 
269 static void nfs_readpage_result(struct rpc_task *task,
270 				struct nfs_pgio_header *hdr)
271 {
272 	if (hdr->res.eof) {
273 		loff_t pos = hdr->args.offset + hdr->res.count;
274 		unsigned int new = pos - hdr->io_start;
275 
276 		if (hdr->good_bytes > new) {
277 			hdr->good_bytes = new;
278 			set_bit(NFS_IOHDR_EOF, &hdr->flags);
279 			clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
280 		}
281 	} else if (hdr->res.count < hdr->args.count)
282 		nfs_readpage_retry(task, hdr);
283 }
284 
285 int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
286 		       struct nfs_open_context *ctx,
287 		       struct folio *folio)
288 {
289 	struct inode *inode = folio->mapping->host;
290 	struct nfs_server *server = NFS_SERVER(inode);
291 	size_t fsize = folio_size(folio);
292 	unsigned int rsize = server->rsize;
293 	struct nfs_page *new;
294 	unsigned int len, aligned_len;
295 	int error;
296 
297 	len = nfs_folio_length(folio);
298 	if (len == 0)
299 		return nfs_return_empty_folio(folio);
300 
301 	aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
302 
303 	new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
304 	if (IS_ERR(new)) {
305 		error = PTR_ERR(new);
306 		if (nfs_netfs_folio_unlock(folio))
307 			folio_unlock(folio);
308 		goto out;
309 	}
310 
311 	if (len < fsize)
312 		folio_zero_segment(folio, len, fsize);
313 	if (!nfs_pageio_add_request(pgio, new)) {
314 		nfs_list_remove_request(new);
315 		error = pgio->pg_error;
316 		nfs_readpage_release(new, error);
317 		goto out;
318 	}
319 	return 0;
320 out:
321 	return error;
322 }
323 
324 /*
325  * Actually read a folio over the wire.
326  */
327 static int nfs_do_read_folio(struct file *file, struct folio *folio)
328 {
329 	struct inode *inode = file_inode(file);
330 	struct nfs_pageio_descriptor pgio;
331 	struct nfs_open_context *ctx;
332 	int ret;
333 
334 	ctx = get_nfs_open_context(nfs_file_open_context(file));
335 
336 	xchg(&ctx->error, 0);
337 	nfs_pageio_init_read(&pgio, inode, false,
338 			     &nfs_async_read_completion_ops);
339 
340 	ret = nfs_read_add_folio(&pgio, ctx, folio);
341 	if (ret)
342 		goto out_put;
343 
344 	nfs_pageio_complete_read(&pgio);
345 	nfs_update_delegated_atime(inode);
346 	if (pgio.pg_error < 0) {
347 		ret = pgio.pg_error;
348 		goto out_put;
349 	}
350 
351 	ret = folio_wait_locked_killable(folio);
352 	if (!folio_test_uptodate(folio) && !ret)
353 		ret = xchg(&ctx->error, 0);
354 
355 out_put:
356 	put_nfs_open_context(ctx);
357 	return ret;
358 }
359 
360 /*
361  * Synchronously read a folio.
362  *
363  * This is not heavily used as most users to try an asynchronous
364  * large read through ->readahead first.
365  */
366 int nfs_read_folio(struct file *file, struct folio *folio)
367 {
368 	struct inode *inode = file_inode(file);
369 	loff_t pos = folio_pos(folio);
370 	size_t len = folio_size(folio);
371 	int ret;
372 
373 	trace_nfs_aop_readpage(inode, pos, len);
374 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
375 	task_io_account_read(len);
376 
377 	/*
378 	 * Try to flush any pending writes to the file..
379 	 *
380 	 * NOTE! Because we own the folio lock, there cannot
381 	 * be any new pending writes generated at this point
382 	 * for this folio (other folios can be written to).
383 	 */
384 	ret = nfs_wb_folio(inode, folio);
385 	if (ret)
386 		goto out_unlock;
387 	if (folio_test_uptodate(folio))
388 		goto out_unlock;
389 
390 	ret = -ESTALE;
391 	if (NFS_STALE(inode))
392 		goto out_unlock;
393 
394 	ret = nfs_netfs_read_folio(file, folio);
395 	if (ret)
396 		ret = nfs_do_read_folio(file, folio);
397 out:
398 	trace_nfs_aop_readpage_done(inode, pos, len, ret);
399 	return ret;
400 out_unlock:
401 	folio_unlock(folio);
402 	goto out;
403 }
404 
405 void nfs_readahead(struct readahead_control *ractl)
406 {
407 	struct nfs_pageio_descriptor pgio;
408 	struct nfs_open_context *ctx;
409 	unsigned int nr_pages = readahead_count(ractl);
410 	struct file *file = ractl->file;
411 	struct inode *inode = ractl->mapping->host;
412 	struct folio *folio;
413 	int ret;
414 
415 	trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
416 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
417 	task_io_account_read(readahead_length(ractl));
418 
419 	ret = -ESTALE;
420 	if (NFS_STALE(inode))
421 		goto out;
422 
423 	ret = nfs_netfs_readahead(ractl);
424 	if (!ret)
425 		goto out;
426 
427 	if (file == NULL) {
428 		ret = -EBADF;
429 		ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
430 		if (ctx == NULL)
431 			goto out;
432 	} else
433 		ctx = get_nfs_open_context(nfs_file_open_context(file));
434 
435 	nfs_pageio_init_read(&pgio, inode, false,
436 			     &nfs_async_read_completion_ops);
437 
438 	while ((folio = readahead_folio(ractl)) != NULL) {
439 		ret = nfs_read_add_folio(&pgio, ctx, folio);
440 		if (ret)
441 			break;
442 	}
443 
444 	nfs_pageio_complete_read(&pgio);
445 	nfs_update_delegated_atime(inode);
446 
447 	put_nfs_open_context(ctx);
448 out:
449 	trace_nfs_aop_readahead_done(inode, nr_pages, ret);
450 }
451 
452 int __init nfs_init_readpagecache(void)
453 {
454 	nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
455 					     sizeof(struct nfs_pgio_header),
456 					     0, SLAB_HWCACHE_ALIGN,
457 					     NULL);
458 	if (nfs_rdata_cachep == NULL)
459 		return -ENOMEM;
460 
461 	return 0;
462 }
463 
464 void nfs_destroy_readpagecache(void)
465 {
466 	kmem_cache_destroy(nfs_rdata_cachep);
467 }
468 
469 static const struct nfs_rw_ops nfs_rw_read_ops = {
470 	.rw_alloc_header	= nfs_readhdr_alloc,
471 	.rw_free_header		= nfs_readhdr_free,
472 	.rw_done		= nfs_readpage_done,
473 	.rw_result		= nfs_readpage_result,
474 	.rw_initiate		= nfs_initiate_read,
475 };
476