xref: /linux/fs/nfs/direct.c (revision 26b0d14106954ae46d2f4f7eec3481828a210f7d)
1 /*
2  * linux/fs/nfs/direct.c
3  *
4  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5  *
6  * High-performance uncached I/O for the Linux NFS client
7  *
8  * There are important applications whose performance or correctness
9  * depends on uncached access to file data.  Database clusters
10  * (multiple copies of the same instance running on separate hosts)
11  * implement their own cache coherency protocol that subsumes file
12  * system cache protocols.  Applications that process datasets
13  * considerably larger than the client's memory do not always benefit
14  * from a local cache.  A streaming video server, for instance, has no
15  * need to cache the contents of a file.
16  *
17  * When an application requests uncached I/O, all read and write requests
18  * are made directly to the server; data stored or fetched via these
19  * requests is not cached in the Linux page cache.  The client does not
20  * correct unaligned requests from applications.  All requested bytes are
21  * held on permanent storage before a direct write system call returns to
22  * an application.
23  *
24  * Solaris implements an uncached I/O facility called directio() that
25  * is used for backups and sequential I/O to very large files.  Solaris
26  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27  * an undocumented mount option.
28  *
29  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30  * help from Andrew Morton.
31  *
32  * 18 Dec 2001	Initial implementation for 2.4  --cel
33  * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
34  * 08 Jun 2003	Port to 2.5 APIs  --cel
35  * 31 Mar 2004	Handle direct I/O without VFS support  --cel
36  * 15 Sep 2004	Parallel async reads  --cel
37  * 04 May 2005	support O_DIRECT with aio  --cel
38  *
39  */
40 
41 #include <linux/errno.h>
42 #include <linux/sched.h>
43 #include <linux/kernel.h>
44 #include <linux/file.h>
45 #include <linux/pagemap.h>
46 #include <linux/kref.h>
47 #include <linux/slab.h>
48 #include <linux/task_io_accounting_ops.h>
49 
50 #include <linux/nfs_fs.h>
51 #include <linux/nfs_page.h>
52 #include <linux/sunrpc/clnt.h>
53 
54 #include <asm/uaccess.h>
55 #include <linux/atomic.h>
56 
57 #include "internal.h"
58 #include "iostat.h"
59 #include "pnfs.h"
60 
61 #define NFSDBG_FACILITY		NFSDBG_VFS
62 
63 static struct kmem_cache *nfs_direct_cachep;
64 
65 /*
66  * This represents a set of asynchronous requests that we're waiting on
67  */
68 struct nfs_direct_req {
69 	struct kref		kref;		/* release manager */
70 
71 	/* I/O parameters */
72 	struct nfs_open_context	*ctx;		/* file open context info */
73 	struct nfs_lock_context *l_ctx;		/* Lock context info */
74 	struct kiocb *		iocb;		/* controlling i/o request */
75 	struct inode *		inode;		/* target file of i/o */
76 
77 	/* completion state */
78 	atomic_t		io_count;	/* i/os we're waiting for */
79 	spinlock_t		lock;		/* protect completion state */
80 	ssize_t			count,		/* bytes actually processed */
81 				error;		/* any reported error */
82 	struct completion	completion;	/* wait for i/o completion */
83 
84 	/* commit state */
85 	struct nfs_mds_commit_info mds_cinfo;	/* Storage for cinfo */
86 	struct pnfs_ds_commit_info ds_cinfo;	/* Storage for cinfo */
87 	struct work_struct	work;
88 	int			flags;
89 #define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
90 #define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
91 	struct nfs_writeverf	verf;		/* unstable write verifier */
92 };
93 
94 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
95 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
96 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
97 static void nfs_direct_write_schedule_work(struct work_struct *work);
98 
99 static inline void get_dreq(struct nfs_direct_req *dreq)
100 {
101 	atomic_inc(&dreq->io_count);
102 }
103 
104 static inline int put_dreq(struct nfs_direct_req *dreq)
105 {
106 	return atomic_dec_and_test(&dreq->io_count);
107 }
108 
109 /**
110  * nfs_direct_IO - NFS address space operation for direct I/O
111  * @rw: direction (read or write)
112  * @iocb: target I/O control block
113  * @iov: array of vectors that define I/O buffer
114  * @pos: offset in file to begin the operation
115  * @nr_segs: size of iovec array
116  *
117  * The presence of this routine in the address space ops vector means
118  * the NFS client supports direct I/O.  However, we shunt off direct
119  * read and write requests before the VFS gets them, so this method
120  * should never be called.
121  */
122 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
123 {
124 	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
125 			iocb->ki_filp->f_path.dentry->d_name.name,
126 			(long long) pos, nr_segs);
127 
128 	return -EINVAL;
129 }
130 
131 static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
132 {
133 	unsigned int i;
134 	for (i = 0; i < npages; i++)
135 		page_cache_release(pages[i]);
136 }
137 
138 void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
139 			      struct nfs_direct_req *dreq)
140 {
141 	cinfo->lock = &dreq->lock;
142 	cinfo->mds = &dreq->mds_cinfo;
143 	cinfo->ds = &dreq->ds_cinfo;
144 	cinfo->dreq = dreq;
145 	cinfo->completion_ops = &nfs_direct_commit_completion_ops;
146 }
147 
148 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
149 {
150 	struct nfs_direct_req *dreq;
151 
152 	dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
153 	if (!dreq)
154 		return NULL;
155 
156 	kref_init(&dreq->kref);
157 	kref_get(&dreq->kref);
158 	init_completion(&dreq->completion);
159 	INIT_LIST_HEAD(&dreq->mds_cinfo.list);
160 	INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
161 	spin_lock_init(&dreq->lock);
162 
163 	return dreq;
164 }
165 
166 static void nfs_direct_req_free(struct kref *kref)
167 {
168 	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
169 
170 	if (dreq->l_ctx != NULL)
171 		nfs_put_lock_context(dreq->l_ctx);
172 	if (dreq->ctx != NULL)
173 		put_nfs_open_context(dreq->ctx);
174 	kmem_cache_free(nfs_direct_cachep, dreq);
175 }
176 
177 static void nfs_direct_req_release(struct nfs_direct_req *dreq)
178 {
179 	kref_put(&dreq->kref, nfs_direct_req_free);
180 }
181 
182 /*
183  * Collects and returns the final error value/byte-count.
184  */
185 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
186 {
187 	ssize_t result = -EIOCBQUEUED;
188 
189 	/* Async requests don't wait here */
190 	if (dreq->iocb)
191 		goto out;
192 
193 	result = wait_for_completion_killable(&dreq->completion);
194 
195 	if (!result)
196 		result = dreq->error;
197 	if (!result)
198 		result = dreq->count;
199 
200 out:
201 	return (ssize_t) result;
202 }
203 
204 /*
205  * Synchronous I/O uses a stack-allocated iocb.  Thus we can't trust
206  * the iocb is still valid here if this is a synchronous request.
207  */
208 static void nfs_direct_complete(struct nfs_direct_req *dreq)
209 {
210 	if (dreq->iocb) {
211 		long res = (long) dreq->error;
212 		if (!res)
213 			res = (long) dreq->count;
214 		aio_complete(dreq->iocb, res, 0);
215 	}
216 	complete_all(&dreq->completion);
217 
218 	nfs_direct_req_release(dreq);
219 }
220 
221 static void nfs_direct_readpage_release(struct nfs_page *req)
222 {
223 	dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
224 		req->wb_context->dentry->d_inode->i_sb->s_id,
225 		(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
226 		req->wb_bytes,
227 		(long long)req_offset(req));
228 	nfs_release_request(req);
229 }
230 
231 static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
232 {
233 	unsigned long bytes = 0;
234 	struct nfs_direct_req *dreq = hdr->dreq;
235 
236 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
237 		goto out_put;
238 
239 	spin_lock(&dreq->lock);
240 	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
241 		dreq->error = hdr->error;
242 	else
243 		dreq->count += hdr->good_bytes;
244 	spin_unlock(&dreq->lock);
245 
246 	while (!list_empty(&hdr->pages)) {
247 		struct nfs_page *req = nfs_list_entry(hdr->pages.next);
248 		struct page *page = req->wb_page;
249 
250 		if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
251 			if (bytes > hdr->good_bytes)
252 				zero_user(page, 0, PAGE_SIZE);
253 			else if (hdr->good_bytes - bytes < PAGE_SIZE)
254 				zero_user_segment(page,
255 					hdr->good_bytes & ~PAGE_MASK,
256 					PAGE_SIZE);
257 		}
258 		if (!PageCompound(page)) {
259 			if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
260 				if (bytes < hdr->good_bytes)
261 					set_page_dirty(page);
262 			} else
263 				set_page_dirty(page);
264 		}
265 		bytes += req->wb_bytes;
266 		nfs_list_remove_request(req);
267 		nfs_direct_readpage_release(req);
268 	}
269 out_put:
270 	if (put_dreq(dreq))
271 		nfs_direct_complete(dreq);
272 	hdr->release(hdr);
273 }
274 
275 static void nfs_read_sync_pgio_error(struct list_head *head)
276 {
277 	struct nfs_page *req;
278 
279 	while (!list_empty(head)) {
280 		req = nfs_list_entry(head->next);
281 		nfs_list_remove_request(req);
282 		nfs_release_request(req);
283 	}
284 }
285 
286 static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
287 {
288 	get_dreq(hdr->dreq);
289 }
290 
291 static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
292 	.error_cleanup = nfs_read_sync_pgio_error,
293 	.init_hdr = nfs_direct_pgio_init,
294 	.completion = nfs_direct_read_completion,
295 };
296 
297 /*
298  * For each rsize'd chunk of the user's buffer, dispatch an NFS READ
299  * operation.  If nfs_readdata_alloc() or get_user_pages() fails,
300  * bail and stop sending more reads.  Read length accounting is
301  * handled automatically by nfs_direct_read_result().  Otherwise, if
302  * no requests have been sent, just return an error.
303  */
304 static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
305 						const struct iovec *iov,
306 						loff_t pos)
307 {
308 	struct nfs_direct_req *dreq = desc->pg_dreq;
309 	struct nfs_open_context *ctx = dreq->ctx;
310 	struct inode *inode = ctx->dentry->d_inode;
311 	unsigned long user_addr = (unsigned long)iov->iov_base;
312 	size_t count = iov->iov_len;
313 	size_t rsize = NFS_SERVER(inode)->rsize;
314 	unsigned int pgbase;
315 	int result;
316 	ssize_t started = 0;
317 	struct page **pagevec = NULL;
318 	unsigned int npages;
319 
320 	do {
321 		size_t bytes;
322 		int i;
323 
324 		pgbase = user_addr & ~PAGE_MASK;
325 		bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
326 
327 		result = -ENOMEM;
328 		npages = nfs_page_array_len(pgbase, bytes);
329 		if (!pagevec)
330 			pagevec = kmalloc(npages * sizeof(struct page *),
331 					  GFP_KERNEL);
332 		if (!pagevec)
333 			break;
334 		down_read(&current->mm->mmap_sem);
335 		result = get_user_pages(current, current->mm, user_addr,
336 					npages, 1, 0, pagevec, NULL);
337 		up_read(&current->mm->mmap_sem);
338 		if (result < 0)
339 			break;
340 		if ((unsigned)result < npages) {
341 			bytes = result * PAGE_SIZE;
342 			if (bytes <= pgbase) {
343 				nfs_direct_release_pages(pagevec, result);
344 				break;
345 			}
346 			bytes -= pgbase;
347 			npages = result;
348 		}
349 
350 		for (i = 0; i < npages; i++) {
351 			struct nfs_page *req;
352 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
353 			/* XXX do we need to do the eof zeroing found in async_filler? */
354 			req = nfs_create_request(dreq->ctx, dreq->inode,
355 						 pagevec[i],
356 						 pgbase, req_len);
357 			if (IS_ERR(req)) {
358 				result = PTR_ERR(req);
359 				break;
360 			}
361 			req->wb_index = pos >> PAGE_SHIFT;
362 			req->wb_offset = pos & ~PAGE_MASK;
363 			if (!nfs_pageio_add_request(desc, req)) {
364 				result = desc->pg_error;
365 				nfs_release_request(req);
366 				break;
367 			}
368 			pgbase = 0;
369 			bytes -= req_len;
370 			started += req_len;
371 			user_addr += req_len;
372 			pos += req_len;
373 			count -= req_len;
374 		}
375 		/* The nfs_page now hold references to these pages */
376 		nfs_direct_release_pages(pagevec, npages);
377 	} while (count != 0 && result >= 0);
378 
379 	kfree(pagevec);
380 
381 	if (started)
382 		return started;
383 	return result < 0 ? (ssize_t) result : -EFAULT;
384 }
385 
386 static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
387 					      const struct iovec *iov,
388 					      unsigned long nr_segs,
389 					      loff_t pos)
390 {
391 	struct nfs_pageio_descriptor desc;
392 	ssize_t result = -EINVAL;
393 	size_t requested_bytes = 0;
394 	unsigned long seg;
395 
396 	nfs_pageio_init_read(&desc, dreq->inode,
397 			     &nfs_direct_read_completion_ops);
398 	get_dreq(dreq);
399 	desc.pg_dreq = dreq;
400 
401 	for (seg = 0; seg < nr_segs; seg++) {
402 		const struct iovec *vec = &iov[seg];
403 		result = nfs_direct_read_schedule_segment(&desc, vec, pos);
404 		if (result < 0)
405 			break;
406 		requested_bytes += result;
407 		if ((size_t)result < vec->iov_len)
408 			break;
409 		pos += vec->iov_len;
410 	}
411 
412 	nfs_pageio_complete(&desc);
413 
414 	/*
415 	 * If no bytes were started, return the error, and let the
416 	 * generic layer handle the completion.
417 	 */
418 	if (requested_bytes == 0) {
419 		nfs_direct_req_release(dreq);
420 		return result < 0 ? result : -EIO;
421 	}
422 
423 	if (put_dreq(dreq))
424 		nfs_direct_complete(dreq);
425 	return 0;
426 }
427 
428 static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
429 			       unsigned long nr_segs, loff_t pos)
430 {
431 	ssize_t result = -ENOMEM;
432 	struct inode *inode = iocb->ki_filp->f_mapping->host;
433 	struct nfs_direct_req *dreq;
434 
435 	dreq = nfs_direct_req_alloc();
436 	if (dreq == NULL)
437 		goto out;
438 
439 	dreq->inode = inode;
440 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
441 	dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
442 	if (dreq->l_ctx == NULL)
443 		goto out_release;
444 	if (!is_sync_kiocb(iocb))
445 		dreq->iocb = iocb;
446 
447 	result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
448 	if (!result)
449 		result = nfs_direct_wait(dreq);
450 	NFS_I(inode)->read_io += result;
451 out_release:
452 	nfs_direct_req_release(dreq);
453 out:
454 	return result;
455 }
456 
457 static void nfs_inode_dio_write_done(struct inode *inode)
458 {
459 	nfs_zap_mapping(inode, inode->i_mapping);
460 	inode_dio_done(inode);
461 }
462 
463 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
464 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
465 {
466 	struct nfs_pageio_descriptor desc;
467 	struct nfs_page *req, *tmp;
468 	LIST_HEAD(reqs);
469 	struct nfs_commit_info cinfo;
470 	LIST_HEAD(failed);
471 
472 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
473 	pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
474 	spin_lock(cinfo.lock);
475 	nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
476 	spin_unlock(cinfo.lock);
477 
478 	dreq->count = 0;
479 	get_dreq(dreq);
480 
481 	nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE,
482 			      &nfs_direct_write_completion_ops);
483 	desc.pg_dreq = dreq;
484 
485 	list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
486 		if (!nfs_pageio_add_request(&desc, req)) {
487 			nfs_list_add_request(req, &failed);
488 			spin_lock(cinfo.lock);
489 			dreq->flags = 0;
490 			dreq->error = -EIO;
491 			spin_unlock(cinfo.lock);
492 		}
493 		nfs_release_request(req);
494 	}
495 	nfs_pageio_complete(&desc);
496 
497 	while (!list_empty(&failed))
498 		nfs_unlock_and_release_request(req);
499 
500 	if (put_dreq(dreq))
501 		nfs_direct_write_complete(dreq, dreq->inode);
502 }
503 
504 static void nfs_direct_commit_complete(struct nfs_commit_data *data)
505 {
506 	struct nfs_direct_req *dreq = data->dreq;
507 	struct nfs_commit_info cinfo;
508 	struct nfs_page *req;
509 	int status = data->task.tk_status;
510 
511 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
512 	if (status < 0) {
513 		dprintk("NFS: %5u commit failed with error %d.\n",
514 			data->task.tk_pid, status);
515 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
516 	} else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
517 		dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
518 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
519 	}
520 
521 	dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
522 	while (!list_empty(&data->pages)) {
523 		req = nfs_list_entry(data->pages.next);
524 		nfs_list_remove_request(req);
525 		if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
526 			/* Note the rewrite will go through mds */
527 			nfs_mark_request_commit(req, NULL, &cinfo);
528 		} else
529 			nfs_release_request(req);
530 		nfs_unlock_and_release_request(req);
531 	}
532 
533 	if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
534 		nfs_direct_write_complete(dreq, data->inode);
535 }
536 
537 static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
538 {
539 	/* There is no lock to clear */
540 }
541 
542 static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
543 	.completion = nfs_direct_commit_complete,
544 	.error_cleanup = nfs_direct_error_cleanup,
545 };
546 
547 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
548 {
549 	int res;
550 	struct nfs_commit_info cinfo;
551 	LIST_HEAD(mds_list);
552 
553 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
554 	nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
555 	res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
556 	if (res < 0) /* res == -ENOMEM */
557 		nfs_direct_write_reschedule(dreq);
558 }
559 
560 static void nfs_direct_write_schedule_work(struct work_struct *work)
561 {
562 	struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
563 	int flags = dreq->flags;
564 
565 	dreq->flags = 0;
566 	switch (flags) {
567 		case NFS_ODIRECT_DO_COMMIT:
568 			nfs_direct_commit_schedule(dreq);
569 			break;
570 		case NFS_ODIRECT_RESCHED_WRITES:
571 			nfs_direct_write_reschedule(dreq);
572 			break;
573 		default:
574 			nfs_inode_dio_write_done(dreq->inode);
575 			nfs_direct_complete(dreq);
576 	}
577 }
578 
579 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
580 {
581 	schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
582 }
583 
584 #else
585 static void nfs_direct_write_schedule_work(struct work_struct *work)
586 {
587 }
588 
589 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
590 {
591 	nfs_inode_dio_write_done(inode);
592 	nfs_direct_complete(dreq);
593 }
594 #endif
595 
596 /*
597  * NB: Return the value of the first error return code.  Subsequent
598  *     errors after the first one are ignored.
599  */
600 /*
601  * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
602  * operation.  If nfs_writedata_alloc() or get_user_pages() fails,
603  * bail and stop sending more writes.  Write length accounting is
604  * handled automatically by nfs_direct_write_result().  Otherwise, if
605  * no requests have been sent, just return an error.
606  */
607 static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
608 						 const struct iovec *iov,
609 						 loff_t pos)
610 {
611 	struct nfs_direct_req *dreq = desc->pg_dreq;
612 	struct nfs_open_context *ctx = dreq->ctx;
613 	struct inode *inode = ctx->dentry->d_inode;
614 	unsigned long user_addr = (unsigned long)iov->iov_base;
615 	size_t count = iov->iov_len;
616 	size_t wsize = NFS_SERVER(inode)->wsize;
617 	unsigned int pgbase;
618 	int result;
619 	ssize_t started = 0;
620 	struct page **pagevec = NULL;
621 	unsigned int npages;
622 
623 	do {
624 		size_t bytes;
625 		int i;
626 
627 		pgbase = user_addr & ~PAGE_MASK;
628 		bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
629 
630 		result = -ENOMEM;
631 		npages = nfs_page_array_len(pgbase, bytes);
632 		if (!pagevec)
633 			pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
634 		if (!pagevec)
635 			break;
636 
637 		down_read(&current->mm->mmap_sem);
638 		result = get_user_pages(current, current->mm, user_addr,
639 					npages, 0, 0, pagevec, NULL);
640 		up_read(&current->mm->mmap_sem);
641 		if (result < 0)
642 			break;
643 
644 		if ((unsigned)result < npages) {
645 			bytes = result * PAGE_SIZE;
646 			if (bytes <= pgbase) {
647 				nfs_direct_release_pages(pagevec, result);
648 				break;
649 			}
650 			bytes -= pgbase;
651 			npages = result;
652 		}
653 
654 		for (i = 0; i < npages; i++) {
655 			struct nfs_page *req;
656 			unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
657 
658 			req = nfs_create_request(dreq->ctx, dreq->inode,
659 						 pagevec[i],
660 						 pgbase, req_len);
661 			if (IS_ERR(req)) {
662 				result = PTR_ERR(req);
663 				break;
664 			}
665 			nfs_lock_request(req);
666 			req->wb_index = pos >> PAGE_SHIFT;
667 			req->wb_offset = pos & ~PAGE_MASK;
668 			if (!nfs_pageio_add_request(desc, req)) {
669 				result = desc->pg_error;
670 				nfs_unlock_and_release_request(req);
671 				break;
672 			}
673 			pgbase = 0;
674 			bytes -= req_len;
675 			started += req_len;
676 			user_addr += req_len;
677 			pos += req_len;
678 			count -= req_len;
679 		}
680 		/* The nfs_page now hold references to these pages */
681 		nfs_direct_release_pages(pagevec, npages);
682 	} while (count != 0 && result >= 0);
683 
684 	kfree(pagevec);
685 
686 	if (started)
687 		return started;
688 	return result < 0 ? (ssize_t) result : -EFAULT;
689 }
690 
691 static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
692 {
693 	struct nfs_direct_req *dreq = hdr->dreq;
694 	struct nfs_commit_info cinfo;
695 	int bit = -1;
696 	struct nfs_page *req = nfs_list_entry(hdr->pages.next);
697 
698 	if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
699 		goto out_put;
700 
701 	nfs_init_cinfo_from_dreq(&cinfo, dreq);
702 
703 	spin_lock(&dreq->lock);
704 
705 	if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
706 		dreq->flags = 0;
707 		dreq->error = hdr->error;
708 	}
709 	if (dreq->error != 0)
710 		bit = NFS_IOHDR_ERROR;
711 	else {
712 		dreq->count += hdr->good_bytes;
713 		if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
714 			dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
715 			bit = NFS_IOHDR_NEED_RESCHED;
716 		} else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
717 			if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
718 				bit = NFS_IOHDR_NEED_RESCHED;
719 			else if (dreq->flags == 0) {
720 				memcpy(&dreq->verf, hdr->verf,
721 				       sizeof(dreq->verf));
722 				bit = NFS_IOHDR_NEED_COMMIT;
723 				dreq->flags = NFS_ODIRECT_DO_COMMIT;
724 			} else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
725 				if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) {
726 					dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
727 					bit = NFS_IOHDR_NEED_RESCHED;
728 				} else
729 					bit = NFS_IOHDR_NEED_COMMIT;
730 			}
731 		}
732 	}
733 	spin_unlock(&dreq->lock);
734 
735 	while (!list_empty(&hdr->pages)) {
736 		req = nfs_list_entry(hdr->pages.next);
737 		nfs_list_remove_request(req);
738 		switch (bit) {
739 		case NFS_IOHDR_NEED_RESCHED:
740 		case NFS_IOHDR_NEED_COMMIT:
741 			kref_get(&req->wb_kref);
742 			nfs_mark_request_commit(req, hdr->lseg, &cinfo);
743 		}
744 		nfs_unlock_and_release_request(req);
745 	}
746 
747 out_put:
748 	if (put_dreq(dreq))
749 		nfs_direct_write_complete(dreq, hdr->inode);
750 	hdr->release(hdr);
751 }
752 
753 static void nfs_write_sync_pgio_error(struct list_head *head)
754 {
755 	struct nfs_page *req;
756 
757 	while (!list_empty(head)) {
758 		req = nfs_list_entry(head->next);
759 		nfs_list_remove_request(req);
760 		nfs_unlock_and_release_request(req);
761 	}
762 }
763 
764 static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
765 	.error_cleanup = nfs_write_sync_pgio_error,
766 	.init_hdr = nfs_direct_pgio_init,
767 	.completion = nfs_direct_write_completion,
768 };
769 
770 static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
771 					       const struct iovec *iov,
772 					       unsigned long nr_segs,
773 					       loff_t pos)
774 {
775 	struct nfs_pageio_descriptor desc;
776 	struct inode *inode = dreq->inode;
777 	ssize_t result = 0;
778 	size_t requested_bytes = 0;
779 	unsigned long seg;
780 
781 	nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE,
782 			      &nfs_direct_write_completion_ops);
783 	desc.pg_dreq = dreq;
784 	get_dreq(dreq);
785 	atomic_inc(&inode->i_dio_count);
786 
787 	for (seg = 0; seg < nr_segs; seg++) {
788 		const struct iovec *vec = &iov[seg];
789 		result = nfs_direct_write_schedule_segment(&desc, vec, pos);
790 		if (result < 0)
791 			break;
792 		requested_bytes += result;
793 		if ((size_t)result < vec->iov_len)
794 			break;
795 		pos += vec->iov_len;
796 	}
797 	nfs_pageio_complete(&desc);
798 	NFS_I(dreq->inode)->write_io += desc.pg_bytes_written;
799 
800 	/*
801 	 * If no bytes were started, return the error, and let the
802 	 * generic layer handle the completion.
803 	 */
804 	if (requested_bytes == 0) {
805 		inode_dio_done(inode);
806 		nfs_direct_req_release(dreq);
807 		return result < 0 ? result : -EIO;
808 	}
809 
810 	if (put_dreq(dreq))
811 		nfs_direct_write_complete(dreq, dreq->inode);
812 	return 0;
813 }
814 
815 static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
816 				unsigned long nr_segs, loff_t pos,
817 				size_t count)
818 {
819 	ssize_t result = -ENOMEM;
820 	struct inode *inode = iocb->ki_filp->f_mapping->host;
821 	struct nfs_direct_req *dreq;
822 
823 	dreq = nfs_direct_req_alloc();
824 	if (!dreq)
825 		goto out;
826 
827 	dreq->inode = inode;
828 	dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
829 	dreq->l_ctx = nfs_get_lock_context(dreq->ctx);
830 	if (dreq->l_ctx == NULL)
831 		goto out_release;
832 	if (!is_sync_kiocb(iocb))
833 		dreq->iocb = iocb;
834 
835 	result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
836 	if (!result)
837 		result = nfs_direct_wait(dreq);
838 out_release:
839 	nfs_direct_req_release(dreq);
840 out:
841 	return result;
842 }
843 
844 /**
845  * nfs_file_direct_read - file direct read operation for NFS files
846  * @iocb: target I/O control block
847  * @iov: vector of user buffers into which to read data
848  * @nr_segs: size of iov vector
849  * @pos: byte offset in file where reading starts
850  *
851  * We use this function for direct reads instead of calling
852  * generic_file_aio_read() in order to avoid gfar's check to see if
853  * the request starts before the end of the file.  For that check
854  * to work, we must generate a GETATTR before each direct read, and
855  * even then there is a window between the GETATTR and the subsequent
856  * READ where the file size could change.  Our preference is simply
857  * to do all reads the application wants, and the server will take
858  * care of managing the end of file boundary.
859  *
860  * This function also eliminates unnecessarily updating the file's
861  * atime locally, as the NFS server sets the file's atime, and this
862  * client must read the updated atime from the server back into its
863  * cache.
864  */
865 ssize_t nfs_file_direct_read(struct kiocb *iocb, const struct iovec *iov,
866 				unsigned long nr_segs, loff_t pos)
867 {
868 	ssize_t retval = -EINVAL;
869 	struct file *file = iocb->ki_filp;
870 	struct address_space *mapping = file->f_mapping;
871 	size_t count;
872 
873 	count = iov_length(iov, nr_segs);
874 	nfs_add_stats(mapping->host, NFSIOS_DIRECTREADBYTES, count);
875 
876 	dfprintk(FILE, "NFS: direct read(%s/%s, %zd@%Ld)\n",
877 		file->f_path.dentry->d_parent->d_name.name,
878 		file->f_path.dentry->d_name.name,
879 		count, (long long) pos);
880 
881 	retval = 0;
882 	if (!count)
883 		goto out;
884 
885 	retval = nfs_sync_mapping(mapping);
886 	if (retval)
887 		goto out;
888 
889 	task_io_account_read(count);
890 
891 	retval = nfs_direct_read(iocb, iov, nr_segs, pos);
892 	if (retval > 0)
893 		iocb->ki_pos = pos + retval;
894 
895 out:
896 	return retval;
897 }
898 
899 /**
900  * nfs_file_direct_write - file direct write operation for NFS files
901  * @iocb: target I/O control block
902  * @iov: vector of user buffers from which to write data
903  * @nr_segs: size of iov vector
904  * @pos: byte offset in file where writing starts
905  *
906  * We use this function for direct writes instead of calling
907  * generic_file_aio_write() in order to avoid taking the inode
908  * semaphore and updating the i_size.  The NFS server will set
909  * the new i_size and this client must read the updated size
910  * back into its cache.  We let the server do generic write
911  * parameter checking and report problems.
912  *
913  * We eliminate local atime updates, see direct read above.
914  *
915  * We avoid unnecessary page cache invalidations for normal cached
916  * readers of this file.
917  *
918  * Note that O_APPEND is not supported for NFS direct writes, as there
919  * is no atomic O_APPEND write facility in the NFS protocol.
920  */
921 ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
922 				unsigned long nr_segs, loff_t pos)
923 {
924 	ssize_t retval = -EINVAL;
925 	struct file *file = iocb->ki_filp;
926 	struct address_space *mapping = file->f_mapping;
927 	size_t count;
928 
929 	count = iov_length(iov, nr_segs);
930 	nfs_add_stats(mapping->host, NFSIOS_DIRECTWRITTENBYTES, count);
931 
932 	dfprintk(FILE, "NFS: direct write(%s/%s, %zd@%Ld)\n",
933 		file->f_path.dentry->d_parent->d_name.name,
934 		file->f_path.dentry->d_name.name,
935 		count, (long long) pos);
936 
937 	retval = generic_write_checks(file, &pos, &count, 0);
938 	if (retval)
939 		goto out;
940 
941 	retval = -EINVAL;
942 	if ((ssize_t) count < 0)
943 		goto out;
944 	retval = 0;
945 	if (!count)
946 		goto out;
947 
948 	retval = nfs_sync_mapping(mapping);
949 	if (retval)
950 		goto out;
951 
952 	task_io_account_write(count);
953 
954 	retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
955 	if (retval > 0) {
956 		struct inode *inode = mapping->host;
957 
958 		iocb->ki_pos = pos + retval;
959 		spin_lock(&inode->i_lock);
960 		if (i_size_read(inode) < iocb->ki_pos)
961 			i_size_write(inode, iocb->ki_pos);
962 		spin_unlock(&inode->i_lock);
963 	}
964 out:
965 	return retval;
966 }
967 
968 /**
969  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
970  *
971  */
972 int __init nfs_init_directcache(void)
973 {
974 	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
975 						sizeof(struct nfs_direct_req),
976 						0, (SLAB_RECLAIM_ACCOUNT|
977 							SLAB_MEM_SPREAD),
978 						NULL);
979 	if (nfs_direct_cachep == NULL)
980 		return -ENOMEM;
981 
982 	return 0;
983 }
984 
985 /**
986  * nfs_destroy_directcache - destroy the slab cache for nfs_direct_req structures
987  *
988  */
989 void nfs_destroy_directcache(void)
990 {
991 	kmem_cache_destroy(nfs_direct_cachep);
992 }
993