xref: /linux/fs/nfs/direct.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /*
2  * linux/fs/nfs/direct.c
3  *
4  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5  *
6  * High-performance uncached I/O for the Linux NFS client
7  *
8  * There are important applications whose performance or correctness
9  * depends on uncached access to file data.  Database clusters
10  * (multiple copies of the same instance running on separate hosts)
11  * implement their own cache coherency protocol that subsumes file
12  * system cache protocols.  Applications that process datasets
13  * considerably larger than the client's memory do not always benefit
14  * from a local cache.  A streaming video server, for instance, has no
15  * need to cache the contents of a file.
16  *
17  * When an application requests uncached I/O, all read and write requests
18  * are made directly to the server; data stored or fetched via these
19  * requests is not cached in the Linux page cache.  The client does not
20  * correct unaligned requests from applications.  All requested bytes are
21  * held on permanent storage before a direct write system call returns to
22  * an application.
23  *
24  * Solaris implements an uncached I/O facility called directio() that
25  * is used for backups and sequential I/O to very large files.  Solaris
26  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27  * an undocumented mount option.
28  *
29  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30  * help from Andrew Morton.
31  *
32  * 18 Dec 2001	Initial implementation for 2.4  --cel
33  * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
34  * 08 Jun 2003	Port to 2.5 APIs  --cel
35  * 31 Mar 2004	Handle direct I/O without VFS support  --cel
36  * 15 Sep 2004	Parallel async reads  --cel
37  * 04 May 2005	support O_DIRECT with aio  --cel
38  *
39  */
40 
41 #include <linux/errno.h>
42 #include <linux/sched.h>
43 #include <linux/kernel.h>
44 #include <linux/file.h>
45 #include <linux/pagemap.h>
46 #include <linux/kref.h>
47 #include <linux/slab.h>
48 #include <linux/task_io_accounting_ops.h>
49 #include <linux/module.h>
50 
51 #include <linux/nfs_fs.h>
52 #include <linux/nfs_page.h>
53 #include <linux/sunrpc/clnt.h>
54 
55 #include <linux/uaccess.h>
56 #include <linux/atomic.h>
57 
58 #include "internal.h"
59 #include "iostat.h"
60 #include "pnfs.h"
61 
62 #define NFSDBG_FACILITY		NFSDBG_VFS
63 
64 static struct kmem_cache *nfs_direct_cachep;
65 
66 /*
67  * This represents a set of asynchronous requests that we're waiting on
68  */
69 struct nfs_direct_mirror {
70 	ssize_t count;
71 };
72 
73 struct nfs_direct_req {
74 	struct kref		kref;		/* release manager */
75 
76 	/* I/O parameters */
77 	struct nfs_open_context	*ctx;		/* file open context info */
78 	struct nfs_lock_context *l_ctx;		/* Lock context info */
79 	struct kiocb *		iocb;		/* controlling i/o request */
80 	struct inode *		inode;		/* target file of i/o */
81 
82 	/* completion state */
83 	atomic_t		io_count;	/* i/os we're waiting for */
84 	spinlock_t		lock;		/* protect completion state */
85 
86 	struct nfs_direct_mirror mirrors[NFS_PAGEIO_DESCRIPTOR_MIRROR_MAX];
87 	int			mirror_count;
88 
89 	loff_t			io_start;	/* Start offset for I/O */
90 	ssize_t			count,		/* bytes actually processed */
91 				max_count,	/* max expected count */
92 				bytes_left,	/* bytes left to be sent */
93 				error;		/* any reported error */
94 	struct completion	completion;	/* wait for i/o completion */
95 
96 	/* commit state */
97 	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
98 	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
99 	struct work_struct	work;
100 	int			flags;
101 	/* for write */
102 #define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
103 #define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
104 	/* for read */
105 #define NFS_ODIRECT_SHOULD_DIRTY	(3)	/* dirty user-space page after read */
106 	struct nfs_writeverf	verf;		/* unstable write verifier */
107 };
108 
109 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
110 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
111 static void nfs_direct_write_complete(struct nfs_direct_req *dreq);
112 static void nfs_direct_write_schedule_work(struct work_struct *work);
113 
114 static inline void get_dreq(struct nfs_direct_req *dreq)
115 {
116 	atomic_inc(&dreq->io_count);
117 }
118 
119 static inline int put_dreq(struct nfs_direct_req *dreq)
120 {
121 	return atomic_dec_and_test(&dreq->io_count);
122 }
123 
124 static void
125 nfs_direct_good_bytes(struct nfs_direct_req *dreq, struct nfs_pgio_header *hdr)
126 {
127 	int i;
128 	ssize_t count;
129 
130 	WARN_ON_ONCE(dreq->count >= dreq->max_count);
131 
132 	if (dreq->mirror_count == 1) {
133 		dreq->mirrors[hdr->pgio_mirror_idx].count += hdr->good_bytes;
134 		dreq->count += hdr->good_bytes;
135 	} else {
136 		/* mirrored writes */
137 		count = dreq->mirrors[hdr->pgio_mirror_idx].count;
138 		if (count + dreq->io_start < hdr->io_start + hdr->good_bytes) {
139 			count = hdr->io_start + hdr->good_bytes - dreq->io_start;
140 			dreq->mirrors[hdr->pgio_mirror_idx].count = count;
141 		}
142 		/* update the dreq->count by finding the minimum agreed count from all
143 		 * mirrors */
144 		count = dreq->mirrors[0].count;
145 
146 		for (i = 1; i < dreq->mirror_count; i++)
147 			count = min(count, dreq->mirrors[i].count);
148 
149 		dreq->count = count;
150 	}
151 }
152 
153 /*
154  * nfs_direct_select_verf - select the right verifier
155  * @dreq - direct request possibly spanning multiple servers
156  * @ds_clp - nfs_client of data server or NULL if MDS / non-pnfs
157  * @commit_idx - commit bucket index for the DS
158  *
159  * returns the correct verifier to use given the role of the server
160  */
161 static struct nfs_writeverf *
162 nfs_direct_select_verf(struct nfs_direct_req *dreq,
163 		       struct nfs_client *ds_clp,
164 		       int commit_idx)
165 {
166 	struct nfs_writeverf *verfp = &dreq->verf;
167 
168 #ifdef CONFIG_NFS_V4_1
169 	/*
170 	 * pNFS is in use, use the DS verf except commit_through_mds is set
171 	 * for layout segment where nbuckets is zero.
172 	 */
173 	if (ds_clp && dreq->ds_cinfo.nbuckets > 0) {
174 		if (commit_idx >= 0 && commit_idx < dreq->ds_cinfo.nbuckets)
175 			verfp = &dreq->ds_cinfo.buckets[commit_idx].direct_verf;
176 		else
177 			WARN_ON_ONCE(1);
178 	}
179 #endif
180 	return verfp;
181 }
182 
183 
184 /*
185  * nfs_direct_set_hdr_verf - set the write/commit verifier
186  * @dreq - direct request possibly spanning multiple servers
187  * @hdr - pageio header to validate against previously seen verfs
188  *
189  * Set the server's (MDS or DS) "seen" verifier
190  */
191 static void nfs_direct_set_hdr_verf(struct nfs_direct_req *dreq,
192 				    struct nfs_pgio_header *hdr)
193 {
194 	struct nfs_writeverf *verfp;
195 
196 	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
197 	WARN_ON_ONCE(verfp->committed >= 0);
198 	memcpy(verfp, &hdr->verf, sizeof(struct nfs_writeverf));
199 	WARN_ON_ONCE(verfp->committed < 0);
200 }
201 
202 static int nfs_direct_cmp_verf(const struct nfs_writeverf *v1,
203 		const struct nfs_writeverf *v2)
204 {
205 	return nfs_write_verifier_cmp(&v1->verifier, &v2->verifier);
206 }
207 
208 /*
209  * nfs_direct_cmp_hdr_verf - compare verifier for pgio header
210  * @dreq - direct request possibly spanning multiple servers
211  * @hdr - pageio header to validate against previously seen verf
212  *
213  * set the server's "seen" verf if not initialized.
214  * returns result of comparison between @hdr->verf and the "seen"
215  * verf of the server used by @hdr (DS or MDS)
216  */
217 static int nfs_direct_set_or_cmp_hdr_verf(struct nfs_direct_req *dreq,
218 					  struct nfs_pgio_header *hdr)
219 {
220 	struct nfs_writeverf *verfp;
221 
222 	verfp = nfs_direct_select_verf(dreq, hdr->ds_clp, hdr->ds_commit_idx);
223 	if (verfp->committed < 0) {
224 		nfs_direct_set_hdr_verf(dreq, hdr);
225 		return 0;
226 	}
227 	return nfs_direct_cmp_verf(verfp, &hdr->verf);
228 }
229 
230 /*
231  * nfs_direct_cmp_commit_data_verf - compare verifier for commit data
232  * @dreq - direct request possibly spanning multiple servers
233  * @data - commit data to validate against previously seen verf
234  *
235  * returns result of comparison between @data->verf and the verf of
236  * the server used by @data (DS or MDS)
237  */
238 static int nfs_direct_cmp_commit_data_verf(struct nfs_direct_req *dreq,
239 					   struct nfs_commit_data *data)
240 {
241 	struct nfs_writeverf *verfp;
242 
243 	verfp = nfs_direct_select_verf(dreq, data->ds_clp,
244 					 data->ds_commit_index);
245 
246 	/* verifier not set so always fail */
247 	if (verfp->committed < 0)
248 		return 1;
249 
250 	return nfs_direct_cmp_verf(verfp, &data->verf);
251 }
252 
253 /**
254  * nfs_direct_IO - NFS address space operation for direct I/O
255  * @iocb: target I/O control block
256  * @iter: I/O buffer
257  *
258  * The presence of this routine in the address space ops vector means
259  * the NFS client supports direct I/O. However, for most direct IO, we
260  * shunt off direct read and write requests before the VFS gets them,
261  * so this method is only ever called for swap.
262  */
263 ssize_t nfs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
264 {
265 	struct inode *inode = iocb->ki_filp->f_mapping->host;
266 
267 	/* we only support swap file calling nfs_direct_IO */
268 	if (!IS_SWAPFILE(inode))
269 		return 0;
270 
271 	VM_BUG_ON(iov_iter_count(iter) != PAGE_SIZE);
272 
273 	if (iov_iter_rw(iter) == READ)
274 		return nfs_file_direct_read(iocb, iter);
275 	return nfs_file_direct_write(iocb, iter);
276 }
277 
278 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
279 {
280 	unsigned int i;
281 	for (i = 0; i < npages; i++)
282 		put_page(pages[i]);
283 }
284 
285 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
286 			      struct nfs_direct_req *dreq)
287 {
288 	cinfo->inode = dreq->inode;
289 	cinfo->mds = &dreq->mds_cinfo;
290 	cinfo->ds = &dreq->ds_cinfo;
291 	cinfo->dreq = dreq;
292 	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
293 }
294 
295 static inline void nfs_direct_setup_mirroring(struct nfs_direct_req *dreq,
296 					     struct nfs_pageio_descriptor *pgio,
297 					     struct nfs_page *req)
298 {
299 	int mirror_count = 1;
300 
301 	if (pgio->pg_ops->pg_get_mirror_count)
302 		mirror_count = pgio->pg_ops->pg_get_mirror_count(pgio, req);
303 
304 	dreq->mirror_count = mirror_count;
305 }
306 
307 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
308 {
309 	struct nfs_direct_req *dreq;
310 
311 	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
312 	if (!dreq)
313 		return NULL;
314 
315 	kref_init(&dreq->kref);
316 	kref_get(&dreq->kref);
317 	init_completion(&dreq->completion);
318 	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
319 	dreq->verf.committed = NFS_INVALID_STABLE_HOW;	/* not set yet */
320 	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
321 	dreq->mirror_count = 1;
322 	spin_lock_init(&dreq->lock);
323 
324 	return dreq;
325 }
326 
327 static void nfs_direct_req_free(struct kref *kref)
328 {
329 	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
330 
331 	nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
332 	if (dreq->l_ctx != NULL)
333 		nfs_put_lock_context(dreq->l_ctx);
334 	if (dreq->ctx != NULL)
335 		put_nfs_open_context(dreq->ctx);
336 	kmem_cache_free(nfs_direct_cachep, dreq);
337 }
338 
339 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
340 {
341 	kref_put(&dreq->kref, nfs_direct_req_free);
342 }
343 
344 ssize_t nfs_dreq_bytes_left(struct nfs_direct_req *dreq)
345 {
346 	return dreq->bytes_left;
347 }
348 EXPORT_SYMBOL_GPL(nfs_dreq_bytes_left);
349 
350 /*
351  * Collects and returns the final error value/byte-count.
352  */
353 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
354 {
355 	ssize_t result = -EIOCBQUEUED;
356 
357 	/* Async requests don't wait here */
358 	if (dreq->iocb)
359 		goto out;
360 
361 	result = wait_for_completion_killable(&dreq->completion);
362 
363 	if (!result) {
364 		result = dreq->count;
365 		WARN_ON_ONCE(dreq->count < 0);
366 	}
367 	if (!result)
368 		result = dreq->error;
369 
370 out:
371 	return (ssize_t) result;
372 }
373 
374 /*
375  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
376  * the iocb is still valid here if this is a synchronous request.
377  */
378 static void nfs_direct_complete(struct nfs_direct_req *dreq)
379 {
380 	struct inode *inode = dreq->inode;
381 
382 	inode_dio_end(inode);
383 
384 	if (dreq->iocb) {
385 		long res = (long) dreq->error;
386 		if (dreq->count != 0) {
387 			res = (long) dreq->count;
388 			WARN_ON_ONCE(dreq->count < 0);
389 		}
390 		dreq->iocb->ki_complete(dreq->iocb, res, 0);
391 	}
392 
393 	complete(&dreq->completion);
394 
395 	nfs_direct_req_release(dreq);
396 }
397 
398 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
399 {
400 	unsigned long bytes = 0;
401 	struct nfs_direct_req *dreq = hdr->dreq;
402 
403 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
404 		goto out_put;
405 
406 	spin_lock(&dreq->lock);
407 	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
408 		dreq->error = hdr->error;
409 	else
410 		nfs_direct_good_bytes(dreq, hdr);
411 
412 	spin_unlock(&dreq->lock);
413 
414 	while (!list_empty(&hdr->pages)) {
415 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
416 		struct page *page = req->wb_page;
417 
418 		if (!PageCompound(page) && bytes < hdr->good_bytes &&
419 		    (dreq->flags == NFS_ODIRECT_SHOULD_DIRTY))
420 			set_page_dirty(page);
421 		bytes += req->wb_bytes;
422 		nfs_list_remove_request(req);
423 		nfs_release_request(req);
424 	}
425 out_put:
426 	if (put_dreq(dreq))
427 		nfs_direct_complete(dreq);
428 	hdr->release(hdr);
429 }
430 
431 static void nfs_read_sync_pgio_error(struct list_head *head)
432 {
433 	struct nfs_page *req;
434 
435 	while (!list_empty(head)) {
436 		req = nfs_list_entry(head->next);
437 		nfs_list_remove_request(req);
438 		nfs_release_request(req);
439 	}
440 }
441 
442 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
443 {
444 	get_dreq(hdr->dreq);
445 }
446 
447 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
448 	.error_cleanup = nfs_read_sync_pgio_error,
449 	.init_hdr = nfs_direct_pgio_init,
450 	.completion = nfs_direct_read_completion,
451 };
452 
453 /*
454  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
455  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
456  * bail and stop sending more reads.  Read length accounting is
457  * handled automatically by nfs_direct_read_result().  Otherwise, if
458  * no requests have been sent, just return an error.
459  */
460 
461 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
462 					      struct iov_iter *iter,
463 					      loff_t pos)
464 {
465 	struct nfs_pageio_descriptor desc;
466 	struct inode *inode = dreq->inode;
467 	ssize_t result = -EINVAL;
468 	size_t requested_bytes = 0;
469 	size_t rsize = max_t(size_t, NFS_SERVER(inode)->rsize, PAGE_SIZE);
470 
471 	nfs_pageio_init_read(&desc, dreq->inode, false,
472 			     &nfs_direct_read_completion_ops);
473 	get_dreq(dreq);
474 	desc.pg_dreq = dreq;
475 	inode_dio_begin(inode);
476 
477 	while (iov_iter_count(iter)) {
478 		struct page **pagevec;
479 		size_t bytes;
480 		size_t pgbase;
481 		unsigned npages, i;
482 
483 		result = iov_iter_get_pages_alloc(iter, &pagevec,
484 						  rsize, &pgbase);
485 		if (result < 0)
486 			break;
487 
488 		bytes = result;
489 		iov_iter_advance(iter, bytes);
490 		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
491 		for (i = 0; i < npages; i++) {
492 			struct nfs_page *req;
493 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
494 			/* XXX do we need to do the eof zeroing found in async_filler? */
495 			req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
496 						 pgbase, req_len);
497 			if (IS_ERR(req)) {
498 				result = PTR_ERR(req);
499 				break;
500 			}
501 			req->wb_index = pos >> PAGE_SHIFT;
502 			req->wb_offset = pos & ~PAGE_MASK;
503 			if (!nfs_pageio_add_request(&desc, req)) {
504 				result = desc.pg_error;
505 				nfs_release_request(req);
506 				break;
507 			}
508 			pgbase = 0;
509 			bytes -= req_len;
510 			requested_bytes += req_len;
511 			pos += req_len;
512 			dreq->bytes_left -= req_len;
513 		}
514 		nfs_direct_release_pages(pagevec, npages);
515 		kvfree(pagevec);
516 		if (result < 0)
517 			break;
518 	}
519 
520 	nfs_pageio_complete(&desc);
521 
522 	/*
523 	 * If no bytes were started, return the error, and let the
524 	 * generic layer handle the completion.
525 	 */
526 	if (requested_bytes == 0) {
527 		inode_dio_end(inode);
528 		nfs_direct_req_release(dreq);
529 		return result < 0 ? result : -EIO;
530 	}
531 
532 	if (put_dreq(dreq))
533 		nfs_direct_complete(dreq);
534 	return requested_bytes;
535 }
536 
537 /**
538  * nfs_file_direct_read - file direct read operation for NFS files
539  * @iocb: target I/O control block
540  * @iter: vector of user buffers into which to read data
541  *
542  * We use this function for direct reads instead of calling
543  * generic_file_aio_read() in order to avoid gfar's check to see if
544  * the request starts before the end of the file.  For that check
545  * to work, we must generate a GETATTR before each direct read, and
546  * even then there is a window between the GETATTR and the subsequent
547  * READ where the file size could change.  Our preference is simply
548  * to do all reads the application wants, and the server will take
549  * care of managing the end of file boundary.
550  *
551  * This function also eliminates unnecessarily updating the file's
552  * atime locally, as the NFS server sets the file's atime, and this
553  * client must read the updated atime from the server back into its
554  * cache.
555  */
556 ssize_t nfs_file_direct_read(struct kiocb *iocb, struct iov_iter *iter)
557 {
558 	struct file *file = iocb->ki_filp;
559 	struct address_space *mapping = file->f_mapping;
560 	struct inode *inode = mapping->host;
561 	struct nfs_direct_req *dreq;
562 	struct nfs_lock_context *l_ctx;
563 	ssize_t result = -EINVAL, requested;
564 	size_t count = iov_iter_count(iter);
565 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
566 
567 	dfprintk(FILE, "NFS: direct read(%pD2, %zd@%Ld)\n",
568 		file, count, (long long) iocb->ki_pos);
569 
570 	result = 0;
571 	if (!count)
572 		goto out;
573 
574 	task_io_account_read(count);
575 
576 	result = -ENOMEM;
577 	dreq = nfs_direct_req_alloc();
578 	if (dreq == NULL)
579 		goto out;
580 
581 	dreq->inode = inode;
582 	dreq->bytes_left = dreq->max_count = count;
583 	dreq->io_start = iocb->ki_pos;
584 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
585 	l_ctx = nfs_get_lock_context(dreq->ctx);
586 	if (IS_ERR(l_ctx)) {
587 		result = PTR_ERR(l_ctx);
588 		goto out_release;
589 	}
590 	dreq->l_ctx = l_ctx;
591 	if (!is_sync_kiocb(iocb))
592 		dreq->iocb = iocb;
593 
594 	if (iter_is_iovec(iter))
595 		dreq->flags = NFS_ODIRECT_SHOULD_DIRTY;
596 
597 	nfs_start_io_direct(inode);
598 
599 	NFS_I(inode)->read_io += count;
600 	requested = nfs_direct_read_schedule_iovec(dreq, iter, iocb->ki_pos);
601 
602 	nfs_end_io_direct(inode);
603 
604 	if (requested > 0) {
605 		result = nfs_direct_wait(dreq);
606 		if (result > 0) {
607 			requested -= result;
608 			iocb->ki_pos += result;
609 		}
610 		iov_iter_revert(iter, requested);
611 	} else {
612 		result = requested;
613 	}
614 
615 out_release:
616 	nfs_direct_req_release(dreq);
617 out:
618 	return result;
619 }
620 
621 static void
622 nfs_direct_write_scan_commit_list(struct inode *inode,
623 				  struct list_head *list,
624 				  struct nfs_commit_info *cinfo)
625 {
626 	mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
627 #ifdef CONFIG_NFS_V4_1
628 	if (cinfo->ds != NULL && cinfo->ds->nwritten != 0)
629 		NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
630 #endif
631 	nfs_scan_commit_list(&cinfo->mds->list, list, cinfo, 0);
632 	mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
633 }
634 
635 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
636 {
637 	struct nfs_pageio_descriptor desc;
638 	struct nfs_page *req, *tmp;
639 	LIST_HEAD(reqs);
640 	struct nfs_commit_info cinfo;
641 	LIST_HEAD(failed);
642 	int i;
643 
644 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
645 	nfs_direct_write_scan_commit_list(dreq->inode, &reqs, &cinfo);
646 
647 	dreq->count = 0;
648 	dreq->verf.committed = NFS_INVALID_STABLE_HOW;
649 	nfs_clear_pnfs_ds_commit_verifiers(&dreq->ds_cinfo);
650 	for (i = 0; i < dreq->mirror_count; i++)
651 		dreq->mirrors[i].count = 0;
652 	get_dreq(dreq);
653 
654 	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE, false,
655 			      &nfs_direct_write_completion_ops);
656 	desc.pg_dreq = dreq;
657 
658 	req = nfs_list_entry(reqs.next);
659 	nfs_direct_setup_mirroring(dreq, &desc, req);
660 	if (desc.pg_error < 0) {
661 		list_splice_init(&reqs, &failed);
662 		goto out_failed;
663 	}
664 
665 	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
666 		if (!nfs_pageio_add_request(&desc, req)) {
667 			nfs_list_remove_request(req);
668 			nfs_list_add_request(req, &failed);
669 			spin_lock(&cinfo.inode->i_lock);
670 			dreq->flags = 0;
671 			if (desc.pg_error < 0)
672 				dreq->error = desc.pg_error;
673 			else
674 				dreq->error = -EIO;
675 			spin_unlock(&cinfo.inode->i_lock);
676 		}
677 		nfs_release_request(req);
678 	}
679 	nfs_pageio_complete(&desc);
680 
681 out_failed:
682 	while (!list_empty(&failed)) {
683 		req = nfs_list_entry(failed.next);
684 		nfs_list_remove_request(req);
685 		nfs_unlock_and_release_request(req);
686 	}
687 
688 	if (put_dreq(dreq))
689 		nfs_direct_write_complete(dreq);
690 }
691 
692 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
693 {
694 	struct nfs_direct_req *dreq = data->dreq;
695 	struct nfs_commit_info cinfo;
696 	struct nfs_page *req;
697 	int status = data->task.tk_status;
698 
699 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
700 	if (status < 0 || nfs_direct_cmp_commit_data_verf(dreq, data))
701 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
702 
703 	while (!list_empty(&data->pages)) {
704 		req = nfs_list_entry(data->pages.next);
705 		nfs_list_remove_request(req);
706 		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
707 			/* Note the rewrite will go through mds */
708 			nfs_mark_request_commit(req, NULL, &cinfo, 0);
709 		} else
710 			nfs_release_request(req);
711 		nfs_unlock_and_release_request(req);
712 	}
713 
714 	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
715 		nfs_direct_write_complete(dreq);
716 }
717 
718 static void nfs_direct_resched_write(struct nfs_commit_info *cinfo,
719 		struct nfs_page *req)
720 {
721 	struct nfs_direct_req *dreq = cinfo->dreq;
722 
723 	spin_lock(&dreq->lock);
724 	dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
725 	spin_unlock(&dreq->lock);
726 	nfs_mark_request_commit(req, NULL, cinfo, 0);
727 }
728 
729 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
730 	.completion = nfs_direct_commit_complete,
731 	.resched_write = nfs_direct_resched_write,
732 };
733 
734 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
735 {
736 	int res;
737 	struct nfs_commit_info cinfo;
738 	LIST_HEAD(mds_list);
739 
740 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
741 	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
742 	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
743 	if (res < 0) /* res == -ENOMEM */
744 		nfs_direct_write_reschedule(dreq);
745 }
746 
747 static void nfs_direct_write_schedule_work(struct work_struct *work)
748 {
749 	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
750 	int flags = dreq->flags;
751 
752 	dreq->flags = 0;
753 	switch (flags) {
754 		case NFS_ODIRECT_DO_COMMIT:
755 			nfs_direct_commit_schedule(dreq);
756 			break;
757 		case NFS_ODIRECT_RESCHED_WRITES:
758 			nfs_direct_write_reschedule(dreq);
759 			break;
760 		default:
761 			nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
762 			nfs_direct_complete(dreq);
763 	}
764 }
765 
766 static void nfs_direct_write_complete(struct nfs_direct_req *dreq)
767 {
768 	queue_work(nfsiod_workqueue, &dreq->work); /* Calls nfs_direct_write_schedule_work */
769 }
770 
771 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
772 {
773 	struct nfs_direct_req *dreq = hdr->dreq;
774 	struct nfs_commit_info cinfo;
775 	bool request_commit = false;
776 	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
777 
778 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
779 		goto out_put;
780 
781 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
782 
783 	spin_lock(&dreq->lock);
784 
785 	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags))
786 		dreq->error = hdr->error;
787 	if (dreq->error == 0) {
788 		nfs_direct_good_bytes(dreq, hdr);
789 		if (nfs_write_need_commit(hdr)) {
790 			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
791 				request_commit = true;
792 			else if (dreq->flags == 0) {
793 				nfs_direct_set_hdr_verf(dreq, hdr);
794 				request_commit = true;
795 				dreq->flags = NFS_ODIRECT_DO_COMMIT;
796 			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
797 				request_commit = true;
798 				if (nfs_direct_set_or_cmp_hdr_verf(dreq, hdr))
799 					dreq->flags =
800 						NFS_ODIRECT_RESCHED_WRITES;
801 			}
802 		}
803 	}
804 	spin_unlock(&dreq->lock);
805 
806 	while (!list_empty(&hdr->pages)) {
807 
808 		req = nfs_list_entry(hdr->pages.next);
809 		nfs_list_remove_request(req);
810 		if (request_commit) {
811 			kref_get(&req->wb_kref);
812 			nfs_mark_request_commit(req, hdr->lseg, &cinfo,
813 				hdr->ds_commit_idx);
814 		}
815 		nfs_unlock_and_release_request(req);
816 	}
817 
818 out_put:
819 	if (put_dreq(dreq))
820 		nfs_direct_write_complete(dreq);
821 	hdr->release(hdr);
822 }
823 
824 static void nfs_write_sync_pgio_error(struct list_head *head)
825 {
826 	struct nfs_page *req;
827 
828 	while (!list_empty(head)) {
829 		req = nfs_list_entry(head->next);
830 		nfs_list_remove_request(req);
831 		nfs_unlock_and_release_request(req);
832 	}
833 }
834 
835 static void nfs_direct_write_reschedule_io(struct nfs_pgio_header *hdr)
836 {
837 	struct nfs_direct_req *dreq = hdr->dreq;
838 
839 	spin_lock(&dreq->lock);
840 	if (dreq->error == 0) {
841 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
842 		/* fake unstable write to let common nfs resend pages */
843 		hdr->verf.committed = NFS_UNSTABLE;
844 		hdr->good_bytes = hdr->args.count;
845 	}
846 	spin_unlock(&dreq->lock);
847 }
848 
849 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
850 	.error_cleanup = nfs_write_sync_pgio_error,
851 	.init_hdr = nfs_direct_pgio_init,
852 	.completion = nfs_direct_write_completion,
853 	.reschedule_io = nfs_direct_write_reschedule_io,
854 };
855 
856 
857 /*
858  * NB: Return the value of the first error return code.  Subsequent
859  *     errors after the first one are ignored.
860  */
861 /*
862  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
863  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
864  * bail and stop sending more writes.  Write length accounting is
865  * handled automatically by nfs_direct_write_result().  Otherwise, if
866  * no requests have been sent, just return an error.
867  */
868 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
869 					       struct iov_iter *iter,
870 					       loff_t pos)
871 {
872 	struct nfs_pageio_descriptor desc;
873 	struct inode *inode = dreq->inode;
874 	ssize_t result = 0;
875 	size_t requested_bytes = 0;
876 	size_t wsize = max_t(size_t, NFS_SERVER(inode)->wsize, PAGE_SIZE);
877 
878 	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE, false,
879 			      &nfs_direct_write_completion_ops);
880 	desc.pg_dreq = dreq;
881 	get_dreq(dreq);
882 	inode_dio_begin(inode);
883 
884 	NFS_I(inode)->write_io += iov_iter_count(iter);
885 	while (iov_iter_count(iter)) {
886 		struct page **pagevec;
887 		size_t bytes;
888 		size_t pgbase;
889 		unsigned npages, i;
890 
891 		result = iov_iter_get_pages_alloc(iter, &pagevec,
892 						  wsize, &pgbase);
893 		if (result < 0)
894 			break;
895 
896 		bytes = result;
897 		iov_iter_advance(iter, bytes);
898 		npages = (result + pgbase + PAGE_SIZE - 1) / PAGE_SIZE;
899 		for (i = 0; i < npages; i++) {
900 			struct nfs_page *req;
901 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
902 
903 			req = nfs_create_request(dreq->ctx, pagevec[i], NULL,
904 						 pgbase, req_len);
905 			if (IS_ERR(req)) {
906 				result = PTR_ERR(req);
907 				break;
908 			}
909 
910 			nfs_direct_setup_mirroring(dreq, &desc, req);
911 			if (desc.pg_error < 0) {
912 				nfs_free_request(req);
913 				result = desc.pg_error;
914 				break;
915 			}
916 
917 			nfs_lock_request(req);
918 			req->wb_index = pos >> PAGE_SHIFT;
919 			req->wb_offset = pos & ~PAGE_MASK;
920 			if (!nfs_pageio_add_request(&desc, req)) {
921 				result = desc.pg_error;
922 				nfs_unlock_and_release_request(req);
923 				break;
924 			}
925 			pgbase = 0;
926 			bytes -= req_len;
927 			requested_bytes += req_len;
928 			pos += req_len;
929 			dreq->bytes_left -= req_len;
930 		}
931 		nfs_direct_release_pages(pagevec, npages);
932 		kvfree(pagevec);
933 		if (result < 0)
934 			break;
935 	}
936 	nfs_pageio_complete(&desc);
937 
938 	/*
939 	 * If no bytes were started, return the error, and let the
940 	 * generic layer handle the completion.
941 	 */
942 	if (requested_bytes == 0) {
943 		inode_dio_end(inode);
944 		nfs_direct_req_release(dreq);
945 		return result < 0 ? result : -EIO;
946 	}
947 
948 	if (put_dreq(dreq))
949 		nfs_direct_write_complete(dreq);
950 	return requested_bytes;
951 }
952 
953 /**
954  * nfs_file_direct_write - file direct write operation for NFS files
955  * @iocb: target I/O control block
956  * @iter: vector of user buffers from which to write data
957  *
958  * We use this function for direct writes instead of calling
959  * generic_file_aio_write() in order to avoid taking the inode
960  * semaphore and updating the i_size.  The NFS server will set
961  * the new i_size and this client must read the updated size
962  * back into its cache.  We let the server do generic write
963  * parameter checking and report problems.
964  *
965  * We eliminate local atime updates, see direct read above.
966  *
967  * We avoid unnecessary page cache invalidations for normal cached
968  * readers of this file.
969  *
970  * Note that O_APPEND is not supported for NFS direct writes, as there
971  * is no atomic O_APPEND write facility in the NFS protocol.
972  */
973 ssize_t nfs_file_direct_write(struct kiocb *iocb, struct iov_iter *iter)
974 {
975 	ssize_t result = -EINVAL, requested;
976 	size_t count;
977 	struct file *file = iocb->ki_filp;
978 	struct address_space *mapping = file->f_mapping;
979 	struct inode *inode = mapping->host;
980 	struct nfs_direct_req *dreq;
981 	struct nfs_lock_context *l_ctx;
982 	loff_t pos, end;
983 
984 	dfprintk(FILE, "NFS: direct write(%pD2, %zd@%Ld)\n",
985 		file, iov_iter_count(iter), (long long) iocb->ki_pos);
986 
987 	result = generic_write_checks(iocb, iter);
988 	if (result <= 0)
989 		return result;
990 	count = result;
991 	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
992 
993 	pos = iocb->ki_pos;
994 	end = (pos + iov_iter_count(iter) - 1) >> PAGE_SHIFT;
995 
996 	task_io_account_write(count);
997 
998 	result = -ENOMEM;
999 	dreq = nfs_direct_req_alloc();
1000 	if (!dreq)
1001 		goto out;
1002 
1003 	dreq->inode = inode;
1004 	dreq->bytes_left = dreq->max_count = count;
1005 	dreq->io_start = pos;
1006 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
1007 	l_ctx = nfs_get_lock_context(dreq->ctx);
1008 	if (IS_ERR(l_ctx)) {
1009 		result = PTR_ERR(l_ctx);
1010 		goto out_release;
1011 	}
1012 	dreq->l_ctx = l_ctx;
1013 	if (!is_sync_kiocb(iocb))
1014 		dreq->iocb = iocb;
1015 
1016 	nfs_start_io_direct(inode);
1017 
1018 	requested = nfs_direct_write_schedule_iovec(dreq, iter, pos);
1019 
1020 	if (mapping->nrpages) {
1021 		invalidate_inode_pages2_range(mapping,
1022 					      pos >> PAGE_SHIFT, end);
1023 	}
1024 
1025 	nfs_end_io_direct(inode);
1026 
1027 	if (requested > 0) {
1028 		result = nfs_direct_wait(dreq);
1029 		if (result > 0) {
1030 			requested -= result;
1031 			iocb->ki_pos = pos + result;
1032 			/* XXX: should check the generic_write_sync retval */
1033 			generic_write_sync(iocb, result);
1034 		}
1035 		iov_iter_revert(iter, requested);
1036 	} else {
1037 		result = requested;
1038 	}
1039 out_release:
1040 	nfs_direct_req_release(dreq);
1041 out:
1042 	return result;
1043 }
1044 
1045 /**
1046  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
1047  *
1048  */
1049 int __init nfs_init_directcache(void)
1050 {
1051 	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
1052 						sizeof(struct nfs_direct_req),
1053 						0, (SLAB_RECLAIM_ACCOUNT|
1054 							SLAB_MEM_SPREAD),
1055 						NULL);
1056 	if (nfs_direct_cachep == NULL)
1057 		return -ENOMEM;
1058 
1059 	return 0;
1060 }
1061 
1062 /**
1063  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
1064  *
1065  */
1066 void nfs_destroy_directcache(void)
1067 {
1068 	kmem_cache_destroy(nfs_direct_cachep);
1069 }
1070