xref: /linux/fs/nfs/read.c (revision a1ff5a7d78a036d6c2178ee5acd6ba4946243800)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/read.c
4  *
5  * Block I/O for NFS
6  *
7  * Partial copy of Linus' read cache modifications to fs/nfs/file.c
8  * modified for async RPC by okir@monad.swb.de
9  */
10 
11 #include <linux/time.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/fcntl.h>
15 #include <linux/stat.h>
16 #include <linux/mm.h>
17 #include <linux/slab.h>
18 #include <linux/task_io_accounting_ops.h>
19 #include <linux/pagemap.h>
20 #include <linux/sunrpc/clnt.h>
21 #include <linux/nfs_fs.h>
22 #include <linux/nfs_page.h>
23 #include <linux/module.h>
24 
25 #include "nfs4_fs.h"
26 #include "internal.h"
27 #include "iostat.h"
28 #include "fscache.h"
29 #include "pnfs.h"
30 #include "nfstrace.h"
31 #include "delegation.h"
32 
33 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
34 
35 const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
36 static const struct nfs_rw_ops nfs_rw_read_ops;
37 
38 static struct kmem_cache *nfs_rdata_cachep;
39 
nfs_readhdr_alloc(void)40 static struct nfs_pgio_header *nfs_readhdr_alloc(void)
41 {
42 	struct nfs_pgio_header *p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
43 
44 	if (p)
45 		p->rw_mode = FMODE_READ;
46 	return p;
47 }
48 
nfs_readhdr_free(struct nfs_pgio_header * rhdr)49 static void nfs_readhdr_free(struct nfs_pgio_header *rhdr)
50 {
51 	if (rhdr->res.scratch != NULL)
52 		kfree(rhdr->res.scratch);
53 	kmem_cache_free(nfs_rdata_cachep, rhdr);
54 }
55 
nfs_return_empty_folio(struct folio * folio)56 static int nfs_return_empty_folio(struct folio *folio)
57 {
58 	folio_zero_segment(folio, 0, folio_size(folio));
59 	folio_mark_uptodate(folio);
60 	folio_unlock(folio);
61 	return 0;
62 }
63 
nfs_pageio_init_read(struct nfs_pageio_descriptor * pgio,struct inode * inode,bool force_mds,const struct nfs_pgio_completion_ops * compl_ops)64 void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
65 			      struct inode *inode, bool force_mds,
66 			      const struct nfs_pgio_completion_ops *compl_ops)
67 {
68 	struct nfs_server *server = NFS_SERVER(inode);
69 	const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
70 
71 #ifdef CONFIG_NFS_V4_1
72 	if (server->pnfs_curr_ld && !force_mds)
73 		pg_ops = server->pnfs_curr_ld->pg_read_ops;
74 #endif
75 	nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_read_ops,
76 			server->rsize, 0);
77 }
78 EXPORT_SYMBOL_GPL(nfs_pageio_init_read);
79 
nfs_pageio_complete_read(struct nfs_pageio_descriptor * pgio)80 void nfs_pageio_complete_read(struct nfs_pageio_descriptor *pgio)
81 {
82 	struct nfs_pgio_mirror *pgm;
83 	unsigned long npages;
84 
85 	nfs_pageio_complete(pgio);
86 
87 	/* It doesn't make sense to do mirrored reads! */
88 	WARN_ON_ONCE(pgio->pg_mirror_count != 1);
89 
90 	pgm = &pgio->pg_mirrors[0];
91 	NFS_I(pgio->pg_inode)->read_io += pgm->pg_bytes_written;
92 	npages = (pgm->pg_bytes_written + PAGE_SIZE - 1) >> PAGE_SHIFT;
93 	nfs_add_stats(pgio->pg_inode, NFSIOS_READPAGES, npages);
94 }
95 
96 
nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor * pgio)97 void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
98 {
99 	struct nfs_pgio_mirror *mirror;
100 
101 	if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
102 		pgio->pg_ops->pg_cleanup(pgio);
103 
104 	pgio->pg_ops = &nfs_pgio_rw_ops;
105 
106 	/* read path should never have more than one mirror */
107 	WARN_ON_ONCE(pgio->pg_mirror_count != 1);
108 
109 	mirror = &pgio->pg_mirrors[0];
110 	mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->rsize;
111 }
112 EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
113 
nfs_read_alloc_scratch(struct nfs_pgio_header * hdr,size_t size)114 bool nfs_read_alloc_scratch(struct nfs_pgio_header *hdr, size_t size)
115 {
116 	WARN_ON(hdr->res.scratch != NULL);
117 	hdr->res.scratch = kmalloc(size, GFP_KERNEL);
118 	return hdr->res.scratch != NULL;
119 }
120 EXPORT_SYMBOL_GPL(nfs_read_alloc_scratch);
121 
nfs_readpage_release(struct nfs_page * req,int error)122 static void nfs_readpage_release(struct nfs_page *req, int error)
123 {
124 	struct folio *folio = nfs_page_to_folio(req);
125 
126 	if (nfs_page_group_sync_on_bit(req, PG_UNLOCKPAGE))
127 		if (nfs_netfs_folio_unlock(folio))
128 			folio_unlock(folio);
129 
130 	nfs_release_request(req);
131 }
132 
nfs_page_group_set_uptodate(struct nfs_page * req)133 static void nfs_page_group_set_uptodate(struct nfs_page *req)
134 {
135 	if (nfs_page_group_sync_on_bit(req, PG_UPTODATE))
136 		folio_mark_uptodate(nfs_page_to_folio(req));
137 }
138 
nfs_read_completion(struct nfs_pgio_header * hdr)139 static void nfs_read_completion(struct nfs_pgio_header *hdr)
140 {
141 	unsigned long bytes = 0;
142 	int error;
143 
144 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
145 		goto out;
146 	while (!list_empty(&hdr->pages)) {
147 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
148 		struct folio *folio = nfs_page_to_folio(req);
149 		unsigned long start = req->wb_pgbase;
150 		unsigned long end = req->wb_pgbase + req->wb_bytes;
151 
152 		if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
153 			/* note: regions of the page not covered by a
154 			 * request are zeroed in nfs_read_add_folio
155 			 */
156 			if (bytes > hdr->good_bytes) {
157 				/* nothing in this request was good, so zero
158 				 * the full extent of the request */
159 				folio_zero_segment(folio, start, end);
160 
161 			} else if (hdr->good_bytes - bytes < req->wb_bytes) {
162 				/* part of this request has good bytes, but
163 				 * not all. zero the bad bytes */
164 				start += hdr->good_bytes - bytes;
165 				WARN_ON(start < req->wb_pgbase);
166 				folio_zero_segment(folio, start, end);
167 			}
168 		}
169 		error = 0;
170 		bytes += req->wb_bytes;
171 		if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
172 			if (bytes <= hdr->good_bytes)
173 				nfs_page_group_set_uptodate(req);
174 			else {
175 				error = hdr->error;
176 				xchg(&nfs_req_openctx(req)->error, error);
177 			}
178 		} else
179 			nfs_page_group_set_uptodate(req);
180 		nfs_list_remove_request(req);
181 		nfs_readpage_release(req, error);
182 	}
183 	nfs_netfs_read_completion(hdr);
184 
185 out:
186 	hdr->release(hdr);
187 }
188 
nfs_initiate_read(struct nfs_pgio_header * hdr,struct rpc_message * msg,const struct nfs_rpc_ops * rpc_ops,struct rpc_task_setup * task_setup_data,int how)189 static void nfs_initiate_read(struct nfs_pgio_header *hdr,
190 			      struct rpc_message *msg,
191 			      const struct nfs_rpc_ops *rpc_ops,
192 			      struct rpc_task_setup *task_setup_data, int how)
193 {
194 	rpc_ops->read_setup(hdr, msg);
195 	nfs_netfs_initiate_read(hdr);
196 	trace_nfs_initiate_read(hdr);
197 }
198 
199 static void
nfs_async_read_error(struct list_head * head,int error)200 nfs_async_read_error(struct list_head *head, int error)
201 {
202 	struct nfs_page	*req;
203 
204 	while (!list_empty(head)) {
205 		req = nfs_list_entry(head->next);
206 		nfs_list_remove_request(req);
207 		nfs_readpage_release(req, error);
208 	}
209 }
210 
211 const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
212 	.error_cleanup = nfs_async_read_error,
213 	.completion = nfs_read_completion,
214 };
215 
216 /*
217  * This is the callback from RPC telling us whether a reply was
218  * received or some error occurred (timeout or socket shutdown).
219  */
nfs_readpage_done(struct rpc_task * task,struct nfs_pgio_header * hdr,struct inode * inode)220 static int nfs_readpage_done(struct rpc_task *task,
221 			     struct nfs_pgio_header *hdr,
222 			     struct inode *inode)
223 {
224 	int status = NFS_PROTO(inode)->read_done(task, hdr);
225 	if (status != 0)
226 		return status;
227 
228 	nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, hdr->res.count);
229 	trace_nfs_readpage_done(task, hdr);
230 
231 	if (task->tk_status == -ESTALE) {
232 		nfs_set_inode_stale(inode);
233 		nfs_mark_for_revalidate(inode);
234 	}
235 	return 0;
236 }
237 
nfs_readpage_retry(struct rpc_task * task,struct nfs_pgio_header * hdr)238 static void nfs_readpage_retry(struct rpc_task *task,
239 			       struct nfs_pgio_header *hdr)
240 {
241 	struct nfs_pgio_args *argp = &hdr->args;
242 	struct nfs_pgio_res  *resp = &hdr->res;
243 
244 	/* This is a short read! */
245 	nfs_inc_stats(hdr->inode, NFSIOS_SHORTREAD);
246 	trace_nfs_readpage_short(task, hdr);
247 
248 	/* Has the server at least made some progress? */
249 	if (resp->count == 0) {
250 		nfs_set_pgio_error(hdr, -EIO, argp->offset);
251 		return;
252 	}
253 
254 	/* For non rpc-based layout drivers, retry-through-MDS */
255 	if (!task->tk_ops) {
256 		hdr->pnfs_error = -EAGAIN;
257 		return;
258 	}
259 
260 	/* Yes, so retry the read at the end of the hdr */
261 	hdr->mds_offset += resp->count;
262 	argp->offset += resp->count;
263 	argp->pgbase += resp->count;
264 	argp->count -= resp->count;
265 	resp->count = 0;
266 	resp->eof = 0;
267 	rpc_restart_call_prepare(task);
268 }
269 
nfs_readpage_result(struct rpc_task * task,struct nfs_pgio_header * hdr)270 static void nfs_readpage_result(struct rpc_task *task,
271 				struct nfs_pgio_header *hdr)
272 {
273 	if (hdr->res.eof) {
274 		loff_t pos = hdr->args.offset + hdr->res.count;
275 		unsigned int new = pos - hdr->io_start;
276 
277 		if (hdr->good_bytes > new) {
278 			hdr->good_bytes = new;
279 			set_bit(NFS_IOHDR_EOF, &hdr->flags);
280 			clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
281 		}
282 	} else if (hdr->res.count < hdr->args.count)
283 		nfs_readpage_retry(task, hdr);
284 }
285 
nfs_read_add_folio(struct nfs_pageio_descriptor * pgio,struct nfs_open_context * ctx,struct folio * folio)286 int nfs_read_add_folio(struct nfs_pageio_descriptor *pgio,
287 		       struct nfs_open_context *ctx,
288 		       struct folio *folio)
289 {
290 	struct inode *inode = folio->mapping->host;
291 	struct nfs_server *server = NFS_SERVER(inode);
292 	size_t fsize = folio_size(folio);
293 	unsigned int rsize = server->rsize;
294 	struct nfs_page *new;
295 	unsigned int len, aligned_len;
296 	int error;
297 
298 	len = nfs_folio_length(folio);
299 	if (len == 0)
300 		return nfs_return_empty_folio(folio);
301 
302 	aligned_len = min_t(unsigned int, ALIGN(len, rsize), fsize);
303 
304 	new = nfs_page_create_from_folio(ctx, folio, 0, aligned_len);
305 	if (IS_ERR(new)) {
306 		error = PTR_ERR(new);
307 		if (nfs_netfs_folio_unlock(folio))
308 			folio_unlock(folio);
309 		goto out;
310 	}
311 
312 	if (len < fsize)
313 		folio_zero_segment(folio, len, fsize);
314 	if (!nfs_pageio_add_request(pgio, new)) {
315 		nfs_list_remove_request(new);
316 		error = pgio->pg_error;
317 		nfs_readpage_release(new, error);
318 		goto out;
319 	}
320 	return 0;
321 out:
322 	return error;
323 }
324 
325 /*
326  * Actually read a folio over the wire.
327  */
nfs_do_read_folio(struct file * file,struct folio * folio)328 static int nfs_do_read_folio(struct file *file, struct folio *folio)
329 {
330 	struct inode *inode = file_inode(file);
331 	struct nfs_pageio_descriptor pgio;
332 	struct nfs_open_context *ctx;
333 	int ret;
334 
335 	ctx = get_nfs_open_context(nfs_file_open_context(file));
336 
337 	xchg(&ctx->error, 0);
338 	nfs_pageio_init_read(&pgio, inode, false,
339 			     &nfs_async_read_completion_ops);
340 
341 	ret = nfs_read_add_folio(&pgio, ctx, folio);
342 	if (ret)
343 		goto out_put;
344 
345 	nfs_pageio_complete_read(&pgio);
346 	nfs_update_delegated_atime(inode);
347 	if (pgio.pg_error < 0) {
348 		ret = pgio.pg_error;
349 		goto out_put;
350 	}
351 
352 	ret = folio_wait_locked_killable(folio);
353 	if (!folio_test_uptodate(folio) && !ret)
354 		ret = xchg(&ctx->error, 0);
355 
356 out_put:
357 	put_nfs_open_context(ctx);
358 	return ret;
359 }
360 
361 /*
362  * Synchronously read a folio.
363  *
364  * This is not heavily used as most users to try an asynchronous
365  * large read through ->readahead first.
366  */
nfs_read_folio(struct file * file,struct folio * folio)367 int nfs_read_folio(struct file *file, struct folio *folio)
368 {
369 	struct inode *inode = file_inode(file);
370 	loff_t pos = folio_pos(folio);
371 	size_t len = folio_size(folio);
372 	int ret;
373 
374 	trace_nfs_aop_readpage(inode, pos, len);
375 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGE);
376 	task_io_account_read(len);
377 
378 	/*
379 	 * Try to flush any pending writes to the file..
380 	 *
381 	 * NOTE! Because we own the folio lock, there cannot
382 	 * be any new pending writes generated at this point
383 	 * for this folio (other folios can be written to).
384 	 */
385 	ret = nfs_wb_folio(inode, folio);
386 	if (ret)
387 		goto out_unlock;
388 	if (folio_test_uptodate(folio))
389 		goto out_unlock;
390 
391 	ret = -ESTALE;
392 	if (NFS_STALE(inode))
393 		goto out_unlock;
394 
395 	ret = nfs_netfs_read_folio(file, folio);
396 	if (ret)
397 		ret = nfs_do_read_folio(file, folio);
398 out:
399 	trace_nfs_aop_readpage_done(inode, pos, len, ret);
400 	return ret;
401 out_unlock:
402 	folio_unlock(folio);
403 	goto out;
404 }
405 
nfs_readahead(struct readahead_control * ractl)406 void nfs_readahead(struct readahead_control *ractl)
407 {
408 	struct nfs_pageio_descriptor pgio;
409 	struct nfs_open_context *ctx;
410 	unsigned int nr_pages = readahead_count(ractl);
411 	struct file *file = ractl->file;
412 	struct inode *inode = ractl->mapping->host;
413 	struct folio *folio;
414 	int ret;
415 
416 	trace_nfs_aop_readahead(inode, readahead_pos(ractl), nr_pages);
417 	nfs_inc_stats(inode, NFSIOS_VFSREADPAGES);
418 	task_io_account_read(readahead_length(ractl));
419 
420 	ret = -ESTALE;
421 	if (NFS_STALE(inode))
422 		goto out;
423 
424 	ret = nfs_netfs_readahead(ractl);
425 	if (!ret)
426 		goto out;
427 
428 	if (file == NULL) {
429 		ret = -EBADF;
430 		ctx = nfs_find_open_context(inode, NULL, FMODE_READ);
431 		if (ctx == NULL)
432 			goto out;
433 	} else
434 		ctx = get_nfs_open_context(nfs_file_open_context(file));
435 
436 	nfs_pageio_init_read(&pgio, inode, false,
437 			     &nfs_async_read_completion_ops);
438 
439 	while ((folio = readahead_folio(ractl)) != NULL) {
440 		ret = nfs_read_add_folio(&pgio, ctx, folio);
441 		if (ret)
442 			break;
443 	}
444 
445 	nfs_pageio_complete_read(&pgio);
446 	nfs_update_delegated_atime(inode);
447 
448 	put_nfs_open_context(ctx);
449 out:
450 	trace_nfs_aop_readahead_done(inode, nr_pages, ret);
451 }
452 
nfs_init_readpagecache(void)453 int __init nfs_init_readpagecache(void)
454 {
455 	nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
456 					     sizeof(struct nfs_pgio_header),
457 					     0, SLAB_HWCACHE_ALIGN,
458 					     NULL);
459 	if (nfs_rdata_cachep == NULL)
460 		return -ENOMEM;
461 
462 	return 0;
463 }
464 
nfs_destroy_readpagecache(void)465 void nfs_destroy_readpagecache(void)
466 {
467 	kmem_cache_destroy(nfs_rdata_cachep);
468 }
469 
470 static const struct nfs_rw_ops nfs_rw_read_ops = {
471 	.rw_alloc_header	= nfs_readhdr_alloc,
472 	.rw_free_header		= nfs_readhdr_free,
473 	.rw_done		= nfs_readpage_done,
474 	.rw_result		= nfs_readpage_result,
475 	.rw_initiate		= nfs_initiate_read,
476 };
477