xref: /linux/fs/nfs/direct.c (revision 86f5536004a61a0c797c14a248fc976f03f55cd5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * linux/fs/nfs/direct.c
4  *
5  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
6  *
7  * High-performance uncached I/O for the Linux NFS client
8  *
9  * There are important applications whose performance or correctness
10  * depends on uncached access to file data.  Database clusters
11  * (multiple copies of the same instance running on separate hosts)
12  * implement their own cache coherency protocol that subsumes file
13  * system cache protocols.  Applications that process datasets
14  * considerably larger than the client's memory do not always benefit
15  * from a local cache.  A streaming video server, for instance, has no
16  * need to cache the contents of a file.
17  *
18  * When an application requests uncached I/O, all read and write requests
19  * are made directly to the server; data stored or fetched via these
20  * requests is not cached in the Linux page cache.  The client does not
21  * correct unaligned requests from applications.  All requested bytes are
22  * held on permanent storage before a direct write system call returns to
23  * an application.
24  *
25  * Solaris implements an uncached I/O facility called directio() that
26  * is used for backups and sequential I/O to very large files.  Solaris
27  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
28  * an undocumented mount option.
29  *
30  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
31  * help from Andrew Morton.
32  *
33  * 18 Dec 2001	Initial implementation for 2.4  --cel
34  * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
35  * 08 Jun 2003	Port to 2.5 APIs  --cel
36  * 31 Mar 2004	Handle direct I/O without VFS support  --cel
37  * 15 Sep 2004	Parallel async reads  --cel
38  * 04 May 2005	support O_DIRECT with aio  --cel
39  *
40  */
41 
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/file.h>
46 #include <linux/pagemap.h>
47 #include <linux/kref.h>
48 #include <linux/slab.h>
49 #include <linux/task_io_accounting_ops.h>
50 #include <linux/module.h>
51 
52 #include <linux/nfs_fs.h>
53 #include <linux/nfs_page.h>
54 #include <linux/sunrpc/clnt.h>
55 
56 #include <linux/uaccess.h>
57 #include <linux/atomic.h>
58 
59 #include "internal.h"
60 #include "iostat.h"
61 #include "pnfs.h"
62 #include "fscache.h"
63 #include "nfstrace.h"
64 
65 #define NFSDBG_FACILITY		NFSDBG_VFS
66 
67 static struct kmem_cache *nfs_direct_cachep;
68 
69 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
70 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
71 static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
72 static void nfs_direct_write_schedule_work(struct work_struct *work);
73 
74 static inline void get_dreq(struct nfs_direct_req *dreq)
75 {
76 	atomic_inc(&dreq->io_count);
77 }
78 
79 static inline int put_dreq(struct nfs_direct_req *dreq)
80 {
81 	return atomic_dec_and_test(&dreq->io_count);
82 }
83 
84 static void
85 nfs_direct_handle_truncated(struct nfs_direct_req *dreq,
86 			    const struct nfs_pgio_header *hdr,
87 			    ssize_t dreq_len)
88 {
89 	if (!(test_bit(NFS_IOHDR_ERROR, &hdr->flags) ||
90 	      test_bit(NFS_IOHDR_EOF, &hdr->flags)))
91 		return;
92 	if (dreq->max_count >= dreq_len) {
93 		dreq->max_count = dreq_len;
94 		if (dreq->count > dreq_len)
95 			dreq->count = dreq_len;
96 	}
97 
98 	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && !dreq->error)
99 		dreq->error = hdr->error;
100 }
101 
102 static void
103 nfs_direct_count_bytes(struct nfs_direct_req *dreq,
104 		       const struct nfs_pgio_header *hdr)
105 {
106 	loff_t hdr_end = hdr->io_start + hdr->good_bytes;
107 	ssize_t dreq_len = 0;
108 
109 	if (hdr_end > dreq->io_start)
110 		dreq_len = hdr_end - dreq->io_start;
111 
112 	nfs_direct_handle_truncated(dreq, hdr, dreq_len);
113 
114 	if (dreq_len > dreq->max_count)
115 		dreq_len = dreq->max_count;
116 
117 	if (dreq->count < dreq_len)
118 		dreq->count = dreq_len;
119 }
120 
121 static void nfs_direct_truncate_request(struct nfs_direct_req *dreq,
122 					struct nfs_page *req)
123 {
124 	loff_t offs = req_offset(req);
125 	size_t req_start = (size_t)(offs - dreq->io_start);
126 
127 	if (req_start < dreq->max_count)
128 		dreq->max_count = req_start;
129 	if (req_start < dreq->count)
130 		dreq->count = req_start;
131 }
132 
133 /**
134  * nfs_swap_rw - NFS address space operation for swap I/O
135  * @iocb: target I/O control block
136  * @iter: I/O buffer
137  *
138  * Perform IO to the swap-file.  This is much like direct IO.
139  */
140 int nfs_swap_rw(struct kiocb *iocb, struct iov_iter *iter)
141 {
142 	ssize_t ret;
143 
144 	if (iov_iter_rw(iter) == READ)
145 		ret = nfs_file_direct_read(iocb, iter, true);
146 	else
147 		ret = nfs_file_direct_write(iocb, iter, true);
148 	if (ret < 0)
149 		return ret;
150 	return 0;
151 }
152 
153 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
154 {
155 	unsigned int i;
156 	for (i = 0; i < npages; i++)
157 		put_page(pages[i]);
158 }
159 
160 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
161 			      struct nfs_direct_req *dreq)
162 {
163 	cinfo->inode = dreq->inode;
164 	cinfo->mds = &dreq->mds_cinfo;
165 	cinfo->ds = &dreq->ds_cinfo;
166 	cinfo->dreq = dreq;
167 	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
168 }
169 
170 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
171 {
172 	struct nfs_direct_req *dreq;
173 
174 	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
175 	if (!dreq)
176 		return NULL;
177 
178 	kref_init(&dreq->kref);
179 	kref_get(&dreq->kref);
180 	init_completion(&dreq->completion);
181 	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
182 	pnfs_init_ds_commit_info(&dreq->ds_cinfo);
183 	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
184 	spin_lock_init(&dreq->lock);
185 
186 	return dreq;
187 }
188 
189 static void nfs_direct_req_free(struct kref *kref)
190 {
191 	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
192 
193 	pnfs_release_ds_info(&dreq->ds_cinfo, dreq->inode);
194 	if (dreq->l_ctx != NULL)
195 		nfs_put_lock_context(dreq->l_ctx);
196 	if (dreq->ctx != NULL)
197 		put_nfs_open_context(dreq->ctx);
198 	kmem_cache_free(nfs_direct_cachep, dreq);
199 }
200 
201 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
202 {
203 	kref_put(&dreq->kref, nfs_direct_req_free);
204 }
205 
206 ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq, loff_t offset)
207 {
208 	loff_t start = offset - dreq->io_start;
209 	return dreq->max_count - start;
210 }
211 EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
212 
213 /*
214  * Collects and returns the final error value/byte-count.
215  */
216 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
217 {
218 	ssize_t result = -EIOCBQUEUED;
219 
220 	/* Async requests don't wait here */
221 	if (dreq->iocb)
222 		goto out;
223 
224 	result = wait_for_completion_killable(&dreq->completion);
225 
226 	if (!result) {
227 		result = dreq->count;
228 		WARN_ON_ONCE(dreq->count < 0);
229 	}
230 	if (!result)
231 		result = dreq->error;
232 
233 out:
234 	return (ssize_t) result;
235 }
236 
237 /*
238  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
239  * the iocb is still valid here if this is a synchronous request.
240  */
241 static void nfs_direct_complete(struct nfs_direct_req *dreq)
242 {
243 	struct inode *inode = dreq->inode;
244 
245 	inode_dio_end(inode);
246 
247 	if (dreq->iocb) {
248 		long res = (long) dreq->error;
249 		if (dreq->count != 0) {
250 			res = (long) dreq->count;
251 			WARN_ON_ONCE(dreq->count < 0);
252 		}
253 		dreq->iocb->ki_complete(dreq->iocb, res);
254 	}
255 
256 	complete(&dreq->completion);
257 
258 	nfs_direct_req_release(dreq);
259 }
260 
261 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
262 {
263 	unsigned long bytes = 0;
264 	struct nfs_direct_req *dreq = hdr->dreq;
265 
266 	spin_lock(&dreq->lock);
267 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
268 		spin_unlock(&dreq->lock);
269 		goto out_put;
270 	}
271 
272 	nfs_direct_count_bytes(dreq, hdr);
273 	spin_unlock(&dreq->lock);
274 
275 	while (!list_empty(&hdr->pages)) {
276 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
277 		struct page *page = req->wb_page;
278 
279 		if (!PageCompound(page) && bytes < hdr->good_bytes &&
280 		    (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
281 			set_page_dirty(page);
282 		bytes += req->wb_bytes;
283 		nfs_list_remove_request(req);
284 		nfs_release_request(req);
285 	}
286 out_put:
287 	if (put_dreq(dreq))
288 		nfs_direct_complete(dreq);
289 	hdr->release(hdr);
290 }
291 
292 static void nfs_read_sync_pgio_error(struct list_head *head, int error)
293 {
294 	struct nfs_page *req;
295 
296 	while (!list_empty(head)) {
297 		req = nfs_list_entry(head->next);
298 		nfs_list_remove_request(req);
299 		nfs_release_request(req);
300 	}
301 }
302 
303 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
304 {
305 	get_dreq(hdr->dreq);
306 	set_bit(NFS_IOHDR_ODIRECT, &hdr->flags);
307 }
308 
309 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
310 	.error_cleanup = nfs_read_sync_pgio_error,
311 	.init_hdr = nfs_direct_pgio_init,
312 	.completion = nfs_direct_read_completion,
313 };
314 
315 /*
316  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
317  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
318  * bail and stop sending more reads.  Read length accounting is
319  * handled automatically by nfs_direct_read_result().  Otherwise, if
320  * no requests have been sent, just return an error.
321  */
322 
323 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
324 					      struct iov_iter *iter,
325 					      loff_t pos)
326 {
327 	struct nfs_pageio_descriptor desc;
328 	struct inode *inode = dreq->inode;
329 	ssize_t result = -EINVAL;
330 	size_t requested_bytes = 0;
331 	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
332 
333 	nfs_pageio_init_read(&desc, dreq->inode, false,
334 			     &nfs_direct_read_completion_ops);
335 	get_dreq(dreq);
336 	desc.pg_dreq = dreq;
337 	inode_dio_begin(inode);
338 
339 	while (iov_iter_count(iter)) {
340 		struct page **pagevec;
341 		size_t bytes;
342 		size_t pgbase;
343 		unsigned npages, i;
344 
345 		result = iov_iter_get_pages_alloc2(iter, &pagevec,
346 						  rsize, &pgbase);
347 		if (result < 0)
348 			break;
349 
350 		bytes = result;
351 		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
352 		for (i = 0; i < npages; i++) {
353 			struct nfs_page *req;
354 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
355 			/* XXX do we need to do the eof zeroing found in async_filler? */
356 			req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
357 							pgbase, pos, req_len);
358 			if (IS_ERR(req)) {
359 				result = PTR_ERR(req);
360 				break;
361 			}
362 			if (!nfs_pageio_add_request(&desc, req)) {
363 				result = desc.pg_error;
364 				nfs_release_request(req);
365 				break;
366 			}
367 			pgbase = 0;
368 			bytes -= req_len;
369 			requested_bytes += req_len;
370 			pos += req_len;
371 		}
372 		nfs_direct_release_pages(pagevec, npages);
373 		kvfree(pagevec);
374 		if (result < 0)
375 			break;
376 	}
377 
378 	nfs_pageio_complete(&desc);
379 
380 	/*
381 	 * If no bytes were started, return the error, and let the
382 	 * generic layer handle the completion.
383 	 */
384 	if (requested_bytes == 0) {
385 		inode_dio_end(inode);
386 		nfs_direct_req_release(dreq);
387 		return result < 0 ? result : -EIO;
388 	}
389 
390 	if (put_dreq(dreq))
391 		nfs_direct_complete(dreq);
392 	return requested_bytes;
393 }
394 
395 /**
396  * nfs_file_direct_read - file direct read operation for NFS files
397  * @iocb: target I/O control block
398  * @iter: vector of user buffers into which to read data
399  * @swap: flag indicating this is swap IO, not O_DIRECT IO
400  *
401  * We use this function for direct reads instead of calling
402  * generic_file_aio_read() in order to avoid gfar's check to see if
403  * the request starts before the end of the file.  For that check
404  * to work, we must generate a GETATTR before each direct read, and
405  * even then there is a window between the GETATTR and the subsequent
406  * READ where the file size could change.  Our preference is simply
407  * to do all reads the application wants, and the server will take
408  * care of managing the end of file boundary.
409  *
410  * This function also eliminates unnecessarily updating the file's
411  * atime locally, as the NFS server sets the file's atime, and this
412  * client must read the updated atime from the server back into its
413  * cache.
414  */
415 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter,
416 			     bool swap)
417 {
418 	struct file *file = iocb->ki_filp;
419 	struct address_space *mapping = file->f_mapping;
420 	struct inode *inode = mapping->host;
421 	struct nfs_direct_req *dreq;
422 	struct nfs_lock_context *l_ctx;
423 	ssize_t result, requested;
424 	size_t count = iov_iter_count(iter);
425 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
426 
427 	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
428 		file, count, (long long) iocb->ki_pos);
429 
430 	result = 0;
431 	if (!count)
432 		goto out;
433 
434 	task_io_account_read(count);
435 
436 	result = -ENOMEM;
437 	dreq = nfs_direct_req_alloc();
438 	if (dreq == NULL)
439 		goto out;
440 
441 	dreq->inode = inode;
442 	dreq->max_count = count;
443 	dreq->io_start = iocb->ki_pos;
444 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
445 	l_ctx = nfs_get_lock_context(dreq->ctx);
446 	if (IS_ERR(l_ctx)) {
447 		result = PTR_ERR(l_ctx);
448 		nfs_direct_req_release(dreq);
449 		goto out_release;
450 	}
451 	dreq->l_ctx = l_ctx;
452 	if (!is_sync_kiocb(iocb))
453 		dreq->iocb = iocb;
454 
455 	if (user_backed_iter(iter))
456 		dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
457 
458 	if (!swap) {
459 		result = nfs_start_io_direct(inode);
460 		if (result) {
461 			/* release the reference that would usually be
462 			 * consumed by nfs_direct_read_schedule_iovec()
463 			 */
464 			nfs_direct_req_release(dreq);
465 			goto out_release;
466 		}
467 	}
468 
469 	NFS_I(inode)->read_io += count;
470 	requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
471 
472 	if (!swap)
473 		nfs_end_io_direct(inode);
474 
475 	if (requested > 0) {
476 		result = nfs_direct_wait(dreq);
477 		if (result > 0) {
478 			requested -= result;
479 			iocb->ki_pos += result;
480 		}
481 		iov_iter_revert(iter, requested);
482 	} else {
483 		result = requested;
484 	}
485 
486 out_release:
487 	nfs_direct_req_release(dreq);
488 out:
489 	return result;
490 }
491 
492 static void nfs_direct_add_page_head(struct list_head *list,
493 				     struct nfs_page *req)
494 {
495 	struct nfs_page *head = req->wb_head;
496 
497 	if (!list_empty(&head->wb_list) || !nfs_lock_request(head))
498 		return;
499 	if (!list_empty(&head->wb_list)) {
500 		nfs_unlock_request(head);
501 		return;
502 	}
503 	list_add(&head->wb_list, list);
504 	kref_get(&head->wb_kref);
505 	kref_get(&head->wb_kref);
506 }
507 
508 static void nfs_direct_join_group(struct list_head *list,
509 				  struct nfs_commit_info *cinfo,
510 				  struct inode *inode)
511 {
512 	struct nfs_page *req, *subreq;
513 
514 	list_for_each_entry(req, list, wb_list) {
515 		if (req->wb_head != req) {
516 			nfs_direct_add_page_head(&req->wb_list, req);
517 			continue;
518 		}
519 		subreq = req->wb_this_page;
520 		if (subreq == req)
521 			continue;
522 		do {
523 			/*
524 			 * Remove subrequests from this list before freeing
525 			 * them in the call to nfs_join_page_group().
526 			 */
527 			if (!list_empty(&subreq->wb_list)) {
528 				nfs_list_remove_request(subreq);
529 				nfs_release_request(subreq);
530 			}
531 		} while ((subreq = subreq->wb_this_page) != req);
532 		nfs_join_page_group(req, cinfo, inode);
533 	}
534 }
535 
536 static void
537 nfs_direct_write_scan_commit_list(struct inode *inode,
538 				  struct list_head *list,
539 				  struct nfs_commit_info *cinfo)
540 {
541 	mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
542 	pnfs_recover_commit_reqs(list, cinfo);
543 	nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
544 	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
545 }
546 
547 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
548 {
549 	struct nfs_pageio_descriptor desc;
550 	struct nfs_page *req;
551 	LIST_HEAD(reqs);
552 	struct nfs_commit_info cinfo;
553 
554 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
555 	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
556 
557 	nfs_direct_join_group(&reqs, &cinfo, dreq->inode);
558 
559 	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
560 	get_dreq(dreq);
561 
562 	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
563 			      &nfs_direct_write_completion_ops);
564 	desc.pg_dreq = dreq;
565 
566 	while (!list_empty(&reqs)) {
567 		req = nfs_list_entry(reqs.next);
568 		/* Bump the transmission count */
569 		req->wb_nio++;
570 		if (!nfs_pageio_add_request(&desc, req)) {
571 			spin_lock(&dreq->lock);
572 			if (dreq->error < 0) {
573 				desc.pg_error = dreq->error;
574 			} else if (desc.pg_error != -EAGAIN) {
575 				dreq->flags = 0;
576 				if (!desc.pg_error)
577 					desc.pg_error = -EIO;
578 				dreq->error = desc.pg_error;
579 			} else
580 				dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
581 			spin_unlock(&dreq->lock);
582 			break;
583 		}
584 		nfs_release_request(req);
585 	}
586 	nfs_pageio_complete(&desc);
587 
588 	while (!list_empty(&reqs)) {
589 		req = nfs_list_entry(reqs.next);
590 		nfs_list_remove_request(req);
591 		nfs_unlock_and_release_request(req);
592 		if (desc.pg_error == -EAGAIN) {
593 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
594 		} else {
595 			spin_lock(&dreq->lock);
596 			nfs_direct_truncate_request(dreq, req);
597 			spin_unlock(&dreq->lock);
598 			nfs_release_request(req);
599 		}
600 	}
601 
602 	if (put_dreq(dreq))
603 		nfs_direct_write_complete(dreq);
604 }
605 
606 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
607 {
608 	const struct nfs_writeverf *verf = data->res.verf;
609 	struct nfs_direct_req *dreq = data->dreq;
610 	struct nfs_commit_info cinfo;
611 	struct nfs_page *req;
612 	int status = data->task.tk_status;
613 
614 	trace_nfs_direct_commit_complete(dreq);
615 
616 	spin_lock(&dreq->lock);
617 	if (status < 0) {
618 		/* Errors in commit are fatal */
619 		dreq->error = status;
620 		dreq->flags = NFS_ODIRECT_DONE;
621 	} else {
622 		status = dreq->error;
623 	}
624 	spin_unlock(&dreq->lock);
625 
626 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
627 
628 	while (!list_empty(&data->pages)) {
629 		req = nfs_list_entry(data->pages.next);
630 		nfs_list_remove_request(req);
631 		if (status < 0) {
632 			spin_lock(&dreq->lock);
633 			nfs_direct_truncate_request(dreq, req);
634 			spin_unlock(&dreq->lock);
635 			nfs_release_request(req);
636 		} else if (!nfs_write_match_verf(verf, req)) {
637 			spin_lock(&dreq->lock);
638 			if (dreq->flags == 0)
639 				dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
640 			spin_unlock(&dreq->lock);
641 			/*
642 			 * Despite the reboot, the write was successful,
643 			 * so reset wb_nio.
644 			 */
645 			req->wb_nio = 0;
646 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
647 		} else
648 			nfs_release_request(req);
649 		nfs_unlock_and_release_request(req);
650 	}
651 
652 	if (nfs_commit_end(cinfo.mds))
653 		nfs_direct_write_complete(dreq);
654 }
655 
656 static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
657 		struct nfs_page *req)
658 {
659 	struct nfs_direct_req *dreq = cinfo->dreq;
660 
661 	trace_nfs_direct_resched_write(dreq);
662 
663 	spin_lock(&dreq->lock);
664 	if (dreq->flags != NFS_ODIRECT_DONE)
665 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
666 	spin_unlock(&dreq->lock);
667 	nfs_mark_request_commit(req, NULL, cinfo, 0);
668 }
669 
670 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
671 	.completion = nfs_direct_commit_complete,
672 	.resched_write = nfs_direct_resched_write,
673 };
674 
675 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
676 {
677 	int res;
678 	struct nfs_commit_info cinfo;
679 	LIST_HEAD(mds_list);
680 
681 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
682 	nfs_commit_begin(cinfo.mds);
683 	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
684 	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
685 	if (res < 0) { /* res == -ENOMEM */
686 		spin_lock(&dreq->lock);
687 		if (dreq->flags == 0)
688 			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
689 		spin_unlock(&dreq->lock);
690 	}
691 	if (nfs_commit_end(cinfo.mds))
692 		nfs_direct_write_complete(dreq);
693 }
694 
695 static void nfs_direct_write_clear_reqs(struct nfs_direct_req *dreq)
696 {
697 	struct nfs_commit_info cinfo;
698 	struct nfs_page *req;
699 	LIST_HEAD(reqs);
700 
701 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
702 	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
703 
704 	while (!list_empty(&reqs)) {
705 		req = nfs_list_entry(reqs.next);
706 		nfs_list_remove_request(req);
707 		nfs_direct_truncate_request(dreq, req);
708 		nfs_release_request(req);
709 		nfs_unlock_and_release_request(req);
710 	}
711 }
712 
713 static void nfs_direct_write_schedule_work(struct work_struct *work)
714 {
715 	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
716 	int flags = dreq->flags;
717 
718 	dreq->flags = 0;
719 	switch (flags) {
720 		case NFS_ODIRECT_DO_COMMIT:
721 			nfs_direct_commit_schedule(dreq);
722 			break;
723 		case NFS_ODIRECT_RESCHED_WRITES:
724 			nfs_direct_write_reschedule(dreq);
725 			break;
726 		default:
727 			nfs_direct_write_clear_reqs(dreq);
728 			nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
729 			nfs_direct_complete(dreq);
730 	}
731 }
732 
733 static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
734 {
735 	trace_nfs_direct_write_complete(dreq);
736 	queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
737 }
738 
739 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
740 {
741 	struct nfs_direct_req *dreq = hdr->dreq;
742 	struct nfs_commit_info cinfo;
743 	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
744 	int flags = NFS_ODIRECT_DONE;
745 
746 	trace_nfs_direct_write_completion(dreq);
747 
748 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
749 
750 	spin_lock(&dreq->lock);
751 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) {
752 		spin_unlock(&dreq->lock);
753 		goto out_put;
754 	}
755 
756 	nfs_direct_count_bytes(dreq, hdr);
757 	if (test_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags) &&
758 	    !test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
759 		if (!dreq->flags)
760 			dreq->flags = NFS_ODIRECT_DO_COMMIT;
761 		flags = dreq->flags;
762 	}
763 	spin_unlock(&dreq->lock);
764 
765 	while (!list_empty(&hdr->pages)) {
766 
767 		req = nfs_list_entry(hdr->pages.next);
768 		nfs_list_remove_request(req);
769 		if (flags == NFS_ODIRECT_DO_COMMIT) {
770 			kref_get(&req->wb_kref);
771 			memcpy(&req->wb_verf, &hdr->verf.verifier,
772 			       sizeof(req->wb_verf));
773 			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
774 				hdr->ds_commit_idx);
775 		} else if (flags == NFS_ODIRECT_RESCHED_WRITES) {
776 			kref_get(&req->wb_kref);
777 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
778 		}
779 		nfs_unlock_and_release_request(req);
780 	}
781 
782 out_put:
783 	if (put_dreq(dreq))
784 		nfs_direct_write_complete(dreq);
785 	hdr->release(hdr);
786 }
787 
788 static void nfs_write_sync_pgio_error(struct list_head *head, int error)
789 {
790 	struct nfs_page *req;
791 
792 	while (!list_empty(head)) {
793 		req = nfs_list_entry(head->next);
794 		nfs_list_remove_request(req);
795 		nfs_unlock_and_release_request(req);
796 	}
797 }
798 
799 static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
800 {
801 	struct nfs_direct_req *dreq = hdr->dreq;
802 	struct nfs_page *req;
803 	struct nfs_commit_info cinfo;
804 
805 	trace_nfs_direct_write_reschedule_io(dreq);
806 
807 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
808 	spin_lock(&dreq->lock);
809 	if (dreq->error == 0)
810 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
811 	set_bit(NFS_IOHDR_REDO, &hdr->flags);
812 	spin_unlock(&dreq->lock);
813 	while (!list_empty(&hdr->pages)) {
814 		req = nfs_list_entry(hdr->pages.next);
815 		nfs_list_remove_request(req);
816 		nfs_unlock_request(req);
817 		nfs_mark_request_commit(req, NULL, &cinfo, 0);
818 	}
819 }
820 
821 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
822 	.error_cleanup = nfs_write_sync_pgio_error,
823 	.init_hdr = nfs_direct_pgio_init,
824 	.completion = nfs_direct_write_completion,
825 	.reschedule_io = nfs_direct_write_reschedule_io,
826 };
827 
828 
829 /*
830  * NB: Return the value of the first error return code.  Subsequent
831  *     errors after the first one are ignored.
832  */
833 /*
834  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
835  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
836  * bail and stop sending more writes.  Write length accounting is
837  * handled automatically by nfs_direct_write_result().  Otherwise, if
838  * no requests have been sent, just return an error.
839  */
840 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
841 					       struct iov_iter *iter,
842 					       loff_t pos, int ioflags)
843 {
844 	struct nfs_pageio_descriptor desc;
845 	struct inode *inode = dreq->inode;
846 	struct nfs_commit_info cinfo;
847 	ssize_t result = 0;
848 	size_t requested_bytes = 0;
849 	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
850 	bool defer = false;
851 
852 	trace_nfs_direct_write_schedule_iovec(dreq);
853 
854 	nfs_pageio_init_write(&desc, inode, ioflags, false,
855 			      &nfs_direct_write_completion_ops);
856 	desc.pg_dreq = dreq;
857 	get_dreq(dreq);
858 	inode_dio_begin(inode);
859 
860 	NFS_I(inode)->write_io += iov_iter_count(iter);
861 	while (iov_iter_count(iter)) {
862 		struct page **pagevec;
863 		size_t bytes;
864 		size_t pgbase;
865 		unsigned npages, i;
866 
867 		result = iov_iter_get_pages_alloc2(iter, &pagevec,
868 						  wsize, &pgbase);
869 		if (result < 0)
870 			break;
871 
872 		bytes = result;
873 		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
874 		for (i = 0; i < npages; i++) {
875 			struct nfs_page *req;
876 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
877 
878 			req = nfs_page_create_from_page(dreq->ctx, pagevec[i],
879 							pgbase, pos, req_len);
880 			if (IS_ERR(req)) {
881 				result = PTR_ERR(req);
882 				break;
883 			}
884 
885 			if (desc.pg_error < 0) {
886 				nfs_free_request(req);
887 				result = desc.pg_error;
888 				break;
889 			}
890 
891 			pgbase = 0;
892 			bytes -= req_len;
893 			requested_bytes += req_len;
894 			pos += req_len;
895 
896 			if (defer) {
897 				nfs_mark_request_commit(req, NULL, &cinfo, 0);
898 				continue;
899 			}
900 
901 			nfs_lock_request(req);
902 			if (nfs_pageio_add_request(&desc, req))
903 				continue;
904 
905 			/* Exit on hard errors */
906 			if (desc.pg_error < 0 && desc.pg_error != -EAGAIN) {
907 				result = desc.pg_error;
908 				nfs_unlock_and_release_request(req);
909 				break;
910 			}
911 
912 			/* If the error is soft, defer remaining requests */
913 			nfs_init_cinfo_from_dreq(&cinfo, dreq);
914 			spin_lock(&dreq->lock);
915 			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
916 			spin_unlock(&dreq->lock);
917 			nfs_unlock_request(req);
918 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
919 			desc.pg_error = 0;
920 			defer = true;
921 		}
922 		nfs_direct_release_pages(pagevec, npages);
923 		kvfree(pagevec);
924 		if (result < 0)
925 			break;
926 	}
927 	nfs_pageio_complete(&desc);
928 
929 	/*
930 	 * If no bytes were started, return the error, and let the
931 	 * generic layer handle the completion.
932 	 */
933 	if (requested_bytes == 0) {
934 		inode_dio_end(inode);
935 		nfs_direct_req_release(dreq);
936 		return result < 0 ? result : -EIO;
937 	}
938 
939 	if (put_dreq(dreq))
940 		nfs_direct_write_complete(dreq);
941 	return requested_bytes;
942 }
943 
944 /**
945  * nfs_file_direct_write - file direct write operation for NFS files
946  * @iocb: target I/O control block
947  * @iter: vector of user buffers from which to write data
948  * @swap: flag indicating this is swap IO, not O_DIRECT IO
949  *
950  * We use this function for direct writes instead of calling
951  * generic_file_aio_write() in order to avoid taking the inode
952  * semaphore and updating the i_size.  The NFS server will set
953  * the new i_size and this client must read the updated size
954  * back into its cache.  We let the server do generic write
955  * parameter checking and report problems.
956  *
957  * We eliminate local atime updates, see direct read above.
958  *
959  * We avoid unnecessary page cache invalidations for normal cached
960  * readers of this file.
961  *
962  * Note that O_APPEND is not supported for NFS direct writes, as there
963  * is no atomic O_APPEND write facility in the NFS protocol.
964  */
965 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter,
966 			      bool swap)
967 {
968 	ssize_t result, requested;
969 	size_t count;
970 	struct file *file = iocb->ki_filp;
971 	struct address_space *mapping = file->f_mapping;
972 	struct inode *inode = mapping->host;
973 	struct nfs_direct_req *dreq;
974 	struct nfs_lock_context *l_ctx;
975 	loff_t pos, end;
976 
977 	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
978 		file, iov_iter_count(iter), (long long) iocb->ki_pos);
979 
980 	if (swap)
981 		/* bypass generic checks */
982 		result =  iov_iter_count(iter);
983 	else
984 		result = generic_write_checks(iocb, iter);
985 	if (result <= 0)
986 		return result;
987 	count = result;
988 	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
989 
990 	pos = iocb->ki_pos;
991 	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
992 
993 	task_io_account_write(count);
994 
995 	result = -ENOMEM;
996 	dreq = nfs_direct_req_alloc();
997 	if (!dreq)
998 		goto out;
999 
1000 	dreq->inode = inode;
1001 	dreq->max_count = count;
1002 	dreq->io_start = pos;
1003 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
1004 	l_ctx = nfs_get_lock_context(dreq->ctx);
1005 	if (IS_ERR(l_ctx)) {
1006 		result = PTR_ERR(l_ctx);
1007 		nfs_direct_req_release(dreq);
1008 		goto out_release;
1009 	}
1010 	dreq->l_ctx = l_ctx;
1011 	if (!is_sync_kiocb(iocb))
1012 		dreq->iocb = iocb;
1013 	pnfs_init_ds_commit_info_ops(&dreq->ds_cinfo, inode);
1014 
1015 	if (swap) {
1016 		requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
1017 							    FLUSH_STABLE);
1018 	} else {
1019 		result = nfs_start_io_direct(inode);
1020 		if (result) {
1021 			/* release the reference that would usually be
1022 			 * consumed by nfs_direct_write_schedule_iovec()
1023 			 */
1024 			nfs_direct_req_release(dreq);
1025 			goto out_release;
1026 		}
1027 
1028 		requested = nfs_direct_write_schedule_iovec(dreq, iter, pos,
1029 							    FLUSH_COND_STABLE);
1030 
1031 		if (mapping->nrpages) {
1032 			invalidate_inode_pages2_range(mapping,
1033 						      pos >> PAGE_SHIFT, end);
1034 		}
1035 
1036 		nfs_end_io_direct(inode);
1037 	}
1038 
1039 	if (requested > 0) {
1040 		result = nfs_direct_wait(dreq);
1041 		if (result > 0) {
1042 			requested -= result;
1043 			iocb->ki_pos = pos + result;
1044 			/* XXX: should check the generic_write_sync retval */
1045 			generic_write_sync(iocb, result);
1046 		}
1047 		iov_iter_revert(iter, requested);
1048 	} else {
1049 		result = requested;
1050 	}
1051 	nfs_fscache_invalidate(inode, FSCACHE_INVAL_DIO_WRITE);
1052 out_release:
1053 	nfs_direct_req_release(dreq);
1054 out:
1055 	return result;
1056 }
1057 
1058 /**
1059  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1060  *
1061  */
1062 int __init nfs_init_directcache(void)
1063 {
1064 	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1065 						sizeof(struct nfs_direct_req),
1066 						0, SLAB_RECLAIM_ACCOUNT,
1067 						NULL);
1068 	if (nfs_direct_cachep == NULL)
1069 		return -ENOMEM;
1070 
1071 	return 0;
1072 }
1073 
1074 /**
1075  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1076  *
1077  */
1078 void nfs_destroy_directcache(void)
1079 {
1080 	kmem_cache_destroy(nfs_direct_cachep);
1081 }
1082