xref: /linux/fs/nfs/direct.c (revision f3d9478b2ce468c3115b02ecae7e975990697f15)
1 /*
2  * linux/fs/nfs/direct.c
3  *
4  * Copyright (C) 2003 by Chuck Lever <cel@netapp.com>
5  *
6  * High-performance uncached I/O for the Linux NFS client
7  *
8  * There are important applications whose performance or correctness
9  * depends on uncached access to file data.  Database clusters
10  * (multiple copies of the same instance running on separate hosts)
11  * implement their own cache coherency protocol that subsumes file
12  * system cache protocols.  Applications that process datasets
13  * considerably larger than the client's memory do not always benefit
14  * from a local cache.  A streaming video server, for instance, has no
15  * need to cache the contents of a file.
16  *
17  * When an application requests uncached I/O, all read and write requests
18  * are made directly to the server; data stored or fetched via these
19  * requests is not cached in the Linux page cache.  The client does not
20  * correct unaligned requests from applications.  All requested bytes are
21  * held on permanent storage before a direct write system call returns to
22  * an application.
23  *
24  * Solaris implements an uncached I/O facility called directio() that
25  * is used for backups and sequential I/O to very large files.  Solaris
26  * also supports uncaching whole NFS partitions with "-o forcedirectio,"
27  * an undocumented mount option.
28  *
29  * Designed by Jeff Kimmel, Chuck Lever, and Trond Myklebust, with
30  * help from Andrew Morton.
31  *
32  * 18 Dec 2001	Initial implementation for 2.4  --cel
33  * 08 Jul 2002	Version for 2.4.19, with bug fixes --trondmy
34  * 08 Jun 2003	Port to 2.5 APIs  --cel
35  * 31 Mar 2004	Handle direct I/O without VFS support  --cel
36  * 15 Sep 2004	Parallel async reads  --cel
37  * 04 May 2005	support O_DIRECT with aio  --cel
38  *
39  */
40 
41 #include <linux/config.h>
42 #include <linux/errno.h>
43 #include <linux/sched.h>
44 #include <linux/kernel.h>
45 #include <linux/smp_lock.h>
46 #include <linux/file.h>
47 #include <linux/pagemap.h>
48 #include <linux/kref.h>
49 
50 #include <linux/nfs_fs.h>
51 #include <linux/nfs_page.h>
52 #include <linux/sunrpc/clnt.h>
53 
54 #include <asm/system.h>
55 #include <asm/uaccess.h>
56 #include <asm/atomic.h>
57 
58 #include "iostat.h"
59 
60 #define NFSDBG_FACILITY		NFSDBG_VFS
61 
62 static kmem_cache_t *nfs_direct_cachep;
63 
64 /*
65  * This represents a set of asynchronous requests that we're waiting on
66  */
67 struct nfs_direct_req {
68 	struct kref		kref;		/* release manager */
69 
70 	/* I/O parameters */
71 	struct list_head	list,		/* nfs_read/write_data structs */
72 				rewrite_list;	/* saved nfs_write_data structs */
73 	struct nfs_open_context	*ctx;		/* file open context info */
74 	struct kiocb *		iocb;		/* controlling i/o request */
75 	struct inode *		inode;		/* target file of i/o */
76 	unsigned long		user_addr;	/* location of user's buffer */
77 	size_t			user_count;	/* total bytes to move */
78 	loff_t			pos;		/* starting offset in file */
79 	struct page **		pages;		/* pages in our buffer */
80 	unsigned int		npages;		/* count of pages */
81 
82 	/* completion state */
83 	spinlock_t		lock;		/* protect completion state */
84 	int			outstanding;	/* i/os we're waiting for */
85 	ssize_t			count,		/* bytes actually processed */
86 				error;		/* any reported error */
87 	struct completion	completion;	/* wait for i/o completion */
88 
89 	/* commit state */
90 	struct nfs_write_data *	commit_data;	/* special write_data for commits */
91 	int			flags;
92 #define NFS_ODIRECT_DO_COMMIT		(1)	/* an unstable reply was received */
93 #define NFS_ODIRECT_RESCHED_WRITES	(2)	/* write verification failed */
94 	struct nfs_writeverf	verf;		/* unstable write verifier */
95 };
96 
97 static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync);
98 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
99 
100 /**
101  * nfs_direct_IO - NFS address space operation for direct I/O
102  * @rw: direction (read or write)
103  * @iocb: target I/O control block
104  * @iov: array of vectors that define I/O buffer
105  * @pos: offset in file to begin the operation
106  * @nr_segs: size of iovec array
107  *
108  * The presence of this routine in the address space ops vector means
109  * the NFS client supports direct I/O.  However, we shunt off direct
110  * read and write requests before the VFS gets them, so this method
111  * should never be called.
112  */
113 ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_t pos, unsigned long nr_segs)
114 {
115 	dprintk("NFS: nfs_direct_IO (%s) off/no(%Ld/%lu) EINVAL\n",
116 			iocb->ki_filp->f_dentry->d_name.name,
117 			(long long) pos, nr_segs);
118 
119 	return -EINVAL;
120 }
121 
122 static void nfs_free_user_pages(struct page **pages, int npages, int do_dirty)
123 {
124 	int i;
125 	for (i = 0; i < npages; i++) {
126 		struct page *page = pages[i];
127 		if (do_dirty && !PageCompound(page))
128 			set_page_dirty_lock(page);
129 		page_cache_release(page);
130 	}
131 	kfree(pages);
132 }
133 
134 static inline int nfs_get_user_pages(int rw, unsigned long user_addr, size_t size, struct page ***pages)
135 {
136 	int result = -ENOMEM;
137 	unsigned long page_count;
138 	size_t array_size;
139 
140 	page_count = (user_addr + size + PAGE_SIZE - 1) >> PAGE_SHIFT;
141 	page_count -= user_addr >> PAGE_SHIFT;
142 
143 	array_size = (page_count * sizeof(struct page *));
144 	*pages = kmalloc(array_size, GFP_KERNEL);
145 	if (*pages) {
146 		down_read(&current->mm->mmap_sem);
147 		result = get_user_pages(current, current->mm, user_addr,
148 					page_count, (rw == READ), 0,
149 					*pages, NULL);
150 		up_read(&current->mm->mmap_sem);
151 		if (result != page_count) {
152 			/*
153 			 * If we got fewer pages than expected from
154 			 * get_user_pages(), the user buffer runs off the
155 			 * end of a mapping; return EFAULT.
156 			 */
157 			if (result >= 0) {
158 				nfs_free_user_pages(*pages, result, 0);
159 				result = -EFAULT;
160 			} else
161 				kfree(*pages);
162 			*pages = NULL;
163 		}
164 	}
165 	return result;
166 }
167 
168 static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
169 {
170 	struct nfs_direct_req *dreq;
171 
172 	dreq = kmem_cache_alloc(nfs_direct_cachep, SLAB_KERNEL);
173 	if (!dreq)
174 		return NULL;
175 
176 	kref_init(&dreq->kref);
177 	init_completion(&dreq->completion);
178 	INIT_LIST_HEAD(&dreq->list);
179 	INIT_LIST_HEAD(&dreq->rewrite_list);
180 	dreq->iocb = NULL;
181 	dreq->ctx = NULL;
182 	spin_lock_init(&dreq->lock);
183 	dreq->outstanding = 0;
184 	dreq->count = 0;
185 	dreq->error = 0;
186 	dreq->flags = 0;
187 
188 	return dreq;
189 }
190 
191 static void nfs_direct_req_release(struct kref *kref)
192 {
193 	struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
194 
195 	if (dreq->ctx != NULL)
196 		put_nfs_open_context(dreq->ctx);
197 	kmem_cache_free(nfs_direct_cachep, dreq);
198 }
199 
200 /*
201  * Collects and returns the final error value/byte-count.
202  */
203 static ssize_t nfs_direct_wait(struct nfs_direct_req *dreq)
204 {
205 	ssize_t result = -EIOCBQUEUED;
206 
207 	/* Async requests don't wait here */
208 	if (dreq->iocb)
209 		goto out;
210 
211 	result = wait_for_completion_interruptible(&dreq->completion);
212 
213 	if (!result)
214 		result = dreq->error;
215 	if (!result)
216 		result = dreq->count;
217 
218 out:
219 	kref_put(&dreq->kref, nfs_direct_req_release);
220 	return (ssize_t) result;
221 }
222 
223 /*
224  * We must hold a reference to all the pages in this direct read request
225  * until the RPCs complete.  This could be long *after* we are woken up in
226  * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
227  *
228  * In addition, synchronous I/O uses a stack-allocated iocb.  Thus we
229  * can't trust the iocb is still valid here if this is a synchronous
230  * request.  If the waiter is woken prematurely, the iocb is long gone.
231  */
232 static void nfs_direct_complete(struct nfs_direct_req *dreq)
233 {
234 	nfs_free_user_pages(dreq->pages, dreq->npages, 1);
235 
236 	if (dreq->iocb) {
237 		long res = (long) dreq->error;
238 		if (!res)
239 			res = (long) dreq->count;
240 		aio_complete(dreq->iocb, res, 0);
241 	}
242 	complete_all(&dreq->completion);
243 
244 	kref_put(&dreq->kref, nfs_direct_req_release);
245 }
246 
247 /*
248  * Note we also set the number of requests we have in the dreq when we are
249  * done.  This prevents races with I/O completion so we will always wait
250  * until all requests have been dispatched and completed.
251  */
252 static struct nfs_direct_req *nfs_direct_read_alloc(size_t nbytes, size_t rsize)
253 {
254 	struct list_head *list;
255 	struct nfs_direct_req *dreq;
256 	unsigned int rpages = (rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
257 
258 	dreq = nfs_direct_req_alloc();
259 	if (!dreq)
260 		return NULL;
261 
262 	list = &dreq->list;
263 	for(;;) {
264 		struct nfs_read_data *data = nfs_readdata_alloc(rpages);
265 
266 		if (unlikely(!data)) {
267 			while (!list_empty(list)) {
268 				data = list_entry(list->next,
269 						  struct nfs_read_data, pages);
270 				list_del(&data->pages);
271 				nfs_readdata_free(data);
272 			}
273 			kref_put(&dreq->kref, nfs_direct_req_release);
274 			return NULL;
275 		}
276 
277 		INIT_LIST_HEAD(&data->pages);
278 		list_add(&data->pages, list);
279 
280 		data->req = (struct nfs_page *) dreq;
281 		dreq->outstanding++;
282 		if (nbytes <= rsize)
283 			break;
284 		nbytes -= rsize;
285 	}
286 	kref_get(&dreq->kref);
287 	return dreq;
288 }
289 
290 static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
291 {
292 	struct nfs_read_data *data = calldata;
293 	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
294 
295 	if (nfs_readpage_result(task, data) != 0)
296 		return;
297 
298 	spin_lock(&dreq->lock);
299 
300 	if (likely(task->tk_status >= 0))
301 		dreq->count += data->res.count;
302 	else
303 		dreq->error = task->tk_status;
304 
305 	if (--dreq->outstanding) {
306 		spin_unlock(&dreq->lock);
307 		return;
308 	}
309 
310 	spin_unlock(&dreq->lock);
311 	nfs_direct_complete(dreq);
312 }
313 
314 static const struct rpc_call_ops nfs_read_direct_ops = {
315 	.rpc_call_done = nfs_direct_read_result,
316 	.rpc_release = nfs_readdata_release,
317 };
318 
319 /*
320  * For each nfs_read_data struct that was allocated on the list, dispatch
321  * an NFS READ operation
322  */
323 static void nfs_direct_read_schedule(struct nfs_direct_req *dreq)
324 {
325 	struct nfs_open_context *ctx = dreq->ctx;
326 	struct inode *inode = ctx->dentry->d_inode;
327 	struct list_head *list = &dreq->list;
328 	struct page **pages = dreq->pages;
329 	size_t count = dreq->user_count;
330 	loff_t pos = dreq->pos;
331 	size_t rsize = NFS_SERVER(inode)->rsize;
332 	unsigned int curpage, pgbase;
333 
334 	curpage = 0;
335 	pgbase = dreq->user_addr & ~PAGE_MASK;
336 	do {
337 		struct nfs_read_data *data;
338 		size_t bytes;
339 
340 		bytes = rsize;
341 		if (count < rsize)
342 			bytes = count;
343 
344 		BUG_ON(list_empty(list));
345 		data = list_entry(list->next, struct nfs_read_data, pages);
346 		list_del_init(&data->pages);
347 
348 		data->inode = inode;
349 		data->cred = ctx->cred;
350 		data->args.fh = NFS_FH(inode);
351 		data->args.context = ctx;
352 		data->args.offset = pos;
353 		data->args.pgbase = pgbase;
354 		data->args.pages = &pages[curpage];
355 		data->args.count = bytes;
356 		data->res.fattr = &data->fattr;
357 		data->res.eof = 0;
358 		data->res.count = bytes;
359 
360 		rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
361 				&nfs_read_direct_ops, data);
362 		NFS_PROTO(inode)->read_setup(data);
363 
364 		data->task.tk_cookie = (unsigned long) inode;
365 
366 		lock_kernel();
367 		rpc_execute(&data->task);
368 		unlock_kernel();
369 
370 		dfprintk(VFS, "NFS: %5u initiated direct read call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
371 				data->task.tk_pid,
372 				inode->i_sb->s_id,
373 				(long long)NFS_FILEID(inode),
374 				bytes,
375 				(unsigned long long)data->args.offset);
376 
377 		pos += bytes;
378 		pgbase += bytes;
379 		curpage += pgbase >> PAGE_SHIFT;
380 		pgbase &= ~PAGE_MASK;
381 
382 		count -= bytes;
383 	} while (count != 0);
384 	BUG_ON(!list_empty(list));
385 }
386 
387 static ssize_t nfs_direct_read(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, unsigned int nr_pages)
388 {
389 	ssize_t result;
390 	sigset_t oldset;
391 	struct inode *inode = iocb->ki_filp->f_mapping->host;
392 	struct rpc_clnt *clnt = NFS_CLIENT(inode);
393 	struct nfs_direct_req *dreq;
394 
395 	dreq = nfs_direct_read_alloc(count, NFS_SERVER(inode)->rsize);
396 	if (!dreq)
397 		return -ENOMEM;
398 
399 	dreq->user_addr = user_addr;
400 	dreq->user_count = count;
401 	dreq->pos = pos;
402 	dreq->pages = pages;
403 	dreq->npages = nr_pages;
404 	dreq->inode = inode;
405 	dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
406 	if (!is_sync_kiocb(iocb))
407 		dreq->iocb = iocb;
408 
409 	nfs_add_stats(inode, NFSIOS_DIRECTREADBYTES, count);
410 	rpc_clnt_sigmask(clnt, &oldset);
411 	nfs_direct_read_schedule(dreq);
412 	result = nfs_direct_wait(dreq);
413 	rpc_clnt_sigunmask(clnt, &oldset);
414 
415 	return result;
416 }
417 
418 static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
419 {
420 	list_splice_init(&dreq->rewrite_list, &dreq->list);
421 	while (!list_empty(&dreq->list)) {
422 		struct nfs_write_data *data = list_entry(dreq->list.next, struct nfs_write_data, pages);
423 		list_del(&data->pages);
424 		nfs_writedata_release(data);
425 	}
426 }
427 
428 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
429 static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
430 {
431 	struct list_head *pos;
432 
433 	list_splice_init(&dreq->rewrite_list, &dreq->list);
434 	list_for_each(pos, &dreq->list)
435 		dreq->outstanding++;
436 	dreq->count = 0;
437 
438 	nfs_direct_write_schedule(dreq, FLUSH_STABLE);
439 }
440 
441 static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
442 {
443 	struct nfs_write_data *data = calldata;
444 	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
445 
446 	/* Call the NFS version-specific code */
447 	if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
448 		return;
449 	if (unlikely(task->tk_status < 0)) {
450 		dreq->error = task->tk_status;
451 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
452 	}
453 	if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
454 		dprintk("NFS: %5u commit verify failed\n", task->tk_pid);
455 		dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
456 	}
457 
458 	dprintk("NFS: %5u commit returned %d\n", task->tk_pid, task->tk_status);
459 	nfs_direct_write_complete(dreq, data->inode);
460 }
461 
462 static const struct rpc_call_ops nfs_commit_direct_ops = {
463 	.rpc_call_done = nfs_direct_commit_result,
464 	.rpc_release = nfs_commit_release,
465 };
466 
467 static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
468 {
469 	struct nfs_write_data *data = dreq->commit_data;
470 
471 	data->inode = dreq->inode;
472 	data->cred = dreq->ctx->cred;
473 
474 	data->args.fh = NFS_FH(data->inode);
475 	data->args.offset = dreq->pos;
476 	data->args.count = dreq->user_count;
477 	data->res.count = 0;
478 	data->res.fattr = &data->fattr;
479 	data->res.verf = &data->verf;
480 
481 	rpc_init_task(&data->task, NFS_CLIENT(dreq->inode), RPC_TASK_ASYNC,
482 				&nfs_commit_direct_ops, data);
483 	NFS_PROTO(data->inode)->commit_setup(data, 0);
484 
485 	data->task.tk_priority = RPC_PRIORITY_NORMAL;
486 	data->task.tk_cookie = (unsigned long)data->inode;
487 	/* Note: task.tk_ops->rpc_release will free dreq->commit_data */
488 	dreq->commit_data = NULL;
489 
490 	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
491 
492 	lock_kernel();
493 	rpc_execute(&data->task);
494 	unlock_kernel();
495 }
496 
497 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
498 {
499 	int flags = dreq->flags;
500 
501 	dreq->flags = 0;
502 	switch (flags) {
503 		case NFS_ODIRECT_DO_COMMIT:
504 			nfs_direct_commit_schedule(dreq);
505 			break;
506 		case NFS_ODIRECT_RESCHED_WRITES:
507 			nfs_direct_write_reschedule(dreq);
508 			break;
509 		default:
510 			nfs_end_data_update(inode);
511 			if (dreq->commit_data != NULL)
512 				nfs_commit_free(dreq->commit_data);
513 			nfs_direct_free_writedata(dreq);
514 			nfs_direct_complete(dreq);
515 	}
516 }
517 
518 static void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
519 {
520 	dreq->commit_data = nfs_commit_alloc(0);
521 	if (dreq->commit_data != NULL)
522 		dreq->commit_data->req = (struct nfs_page *) dreq;
523 }
524 #else
525 static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq)
526 {
527 	dreq->commit_data = NULL;
528 }
529 
530 static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
531 {
532 	nfs_end_data_update(inode);
533 	nfs_direct_free_writedata(dreq);
534 	nfs_direct_complete(dreq);
535 }
536 #endif
537 
538 static struct nfs_direct_req *nfs_direct_write_alloc(size_t nbytes, size_t wsize)
539 {
540 	struct list_head *list;
541 	struct nfs_direct_req *dreq;
542 	unsigned int wpages = (wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
543 
544 	dreq = nfs_direct_req_alloc();
545 	if (!dreq)
546 		return NULL;
547 
548 	list = &dreq->list;
549 	for(;;) {
550 		struct nfs_write_data *data = nfs_writedata_alloc(wpages);
551 
552 		if (unlikely(!data)) {
553 			while (!list_empty(list)) {
554 				data = list_entry(list->next,
555 						  struct nfs_write_data, pages);
556 				list_del(&data->pages);
557 				nfs_writedata_free(data);
558 			}
559 			kref_put(&dreq->kref, nfs_direct_req_release);
560 			return NULL;
561 		}
562 
563 		INIT_LIST_HEAD(&data->pages);
564 		list_add(&data->pages, list);
565 
566 		data->req = (struct nfs_page *) dreq;
567 		dreq->outstanding++;
568 		if (nbytes <= wsize)
569 			break;
570 		nbytes -= wsize;
571 	}
572 
573 	nfs_alloc_commit_data(dreq);
574 
575 	kref_get(&dreq->kref);
576 	return dreq;
577 }
578 
579 static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
580 {
581 	struct nfs_write_data *data = calldata;
582 	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
583 	int status = task->tk_status;
584 
585 	if (nfs_writeback_done(task, data) != 0)
586 		return;
587 
588 	spin_lock(&dreq->lock);
589 
590 	if (likely(status >= 0))
591 		dreq->count += data->res.count;
592 	else
593 		dreq->error = task->tk_status;
594 
595 	if (data->res.verf->committed != NFS_FILE_SYNC) {
596 		switch (dreq->flags) {
597 			case 0:
598 				memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
599 				dreq->flags = NFS_ODIRECT_DO_COMMIT;
600 				break;
601 			case NFS_ODIRECT_DO_COMMIT:
602 				if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
603 					dprintk("NFS: %5u write verify failed\n", task->tk_pid);
604 					dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
605 				}
606 		}
607 	}
608 	/* In case we have to resend */
609 	data->args.stable = NFS_FILE_SYNC;
610 
611 	spin_unlock(&dreq->lock);
612 }
613 
614 /*
615  * NB: Return the value of the first error return code.  Subsequent
616  *     errors after the first one are ignored.
617  */
618 static void nfs_direct_write_release(void *calldata)
619 {
620 	struct nfs_write_data *data = calldata;
621 	struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
622 
623 	spin_lock(&dreq->lock);
624 	if (--dreq->outstanding) {
625 		spin_unlock(&dreq->lock);
626 		return;
627 	}
628 	spin_unlock(&dreq->lock);
629 
630 	nfs_direct_write_complete(dreq, data->inode);
631 }
632 
633 static const struct rpc_call_ops nfs_write_direct_ops = {
634 	.rpc_call_done = nfs_direct_write_result,
635 	.rpc_release = nfs_direct_write_release,
636 };
637 
638 /*
639  * For each nfs_write_data struct that was allocated on the list, dispatch
640  * an NFS WRITE operation
641  */
642 static void nfs_direct_write_schedule(struct nfs_direct_req *dreq, int sync)
643 {
644 	struct nfs_open_context *ctx = dreq->ctx;
645 	struct inode *inode = ctx->dentry->d_inode;
646 	struct list_head *list = &dreq->list;
647 	struct page **pages = dreq->pages;
648 	size_t count = dreq->user_count;
649 	loff_t pos = dreq->pos;
650 	size_t wsize = NFS_SERVER(inode)->wsize;
651 	unsigned int curpage, pgbase;
652 
653 	curpage = 0;
654 	pgbase = dreq->user_addr & ~PAGE_MASK;
655 	do {
656 		struct nfs_write_data *data;
657 		size_t bytes;
658 
659 		bytes = wsize;
660 		if (count < wsize)
661 			bytes = count;
662 
663 		BUG_ON(list_empty(list));
664 		data = list_entry(list->next, struct nfs_write_data, pages);
665 		list_move_tail(&data->pages, &dreq->rewrite_list);
666 
667 		data->inode = inode;
668 		data->cred = ctx->cred;
669 		data->args.fh = NFS_FH(inode);
670 		data->args.context = ctx;
671 		data->args.offset = pos;
672 		data->args.pgbase = pgbase;
673 		data->args.pages = &pages[curpage];
674 		data->args.count = bytes;
675 		data->res.fattr = &data->fattr;
676 		data->res.count = bytes;
677 		data->res.verf = &data->verf;
678 
679 		rpc_init_task(&data->task, NFS_CLIENT(inode), RPC_TASK_ASYNC,
680 				&nfs_write_direct_ops, data);
681 		NFS_PROTO(inode)->write_setup(data, sync);
682 
683 		data->task.tk_priority = RPC_PRIORITY_NORMAL;
684 		data->task.tk_cookie = (unsigned long) inode;
685 
686 		lock_kernel();
687 		rpc_execute(&data->task);
688 		unlock_kernel();
689 
690 		dfprintk(VFS, "NFS: %5u initiated direct write call (req %s/%Ld, %zu bytes @ offset %Lu)\n",
691 				data->task.tk_pid,
692 				inode->i_sb->s_id,
693 				(long long)NFS_FILEID(inode),
694 				bytes,
695 				(unsigned long long)data->args.offset);
696 
697 		pos += bytes;
698 		pgbase += bytes;
699 		curpage += pgbase >> PAGE_SHIFT;
700 		pgbase &= ~PAGE_MASK;
701 
702 		count -= bytes;
703 	} while (count != 0);
704 	BUG_ON(!list_empty(list));
705 }
706 
707 static ssize_t nfs_direct_write(struct kiocb *iocb, unsigned long user_addr, size_t count, loff_t pos, struct page **pages, int nr_pages)
708 {
709 	ssize_t result;
710 	sigset_t oldset;
711 	struct inode *inode = iocb->ki_filp->f_mapping->host;
712 	struct rpc_clnt *clnt = NFS_CLIENT(inode);
713 	struct nfs_direct_req *dreq;
714 	size_t wsize = NFS_SERVER(inode)->wsize;
715 	int sync = 0;
716 
717 	dreq = nfs_direct_write_alloc(count, wsize);
718 	if (!dreq)
719 		return -ENOMEM;
720 	if (dreq->commit_data == NULL || count < wsize)
721 		sync = FLUSH_STABLE;
722 
723 	dreq->user_addr = user_addr;
724 	dreq->user_count = count;
725 	dreq->pos = pos;
726 	dreq->pages = pages;
727 	dreq->npages = nr_pages;
728 	dreq->inode = inode;
729 	dreq->ctx = get_nfs_open_context((struct nfs_open_context *)iocb->ki_filp->private_data);
730 	if (!is_sync_kiocb(iocb))
731 		dreq->iocb = iocb;
732 
733 	nfs_add_stats(inode, NFSIOS_DIRECTWRITTENBYTES, count);
734 
735 	nfs_begin_data_update(inode);
736 
737 	rpc_clnt_sigmask(clnt, &oldset);
738 	nfs_direct_write_schedule(dreq, sync);
739 	result = nfs_direct_wait(dreq);
740 	rpc_clnt_sigunmask(clnt, &oldset);
741 
742 	return result;
743 }
744 
745 /**
746  * nfs_file_direct_read - file direct read operation for NFS files
747  * @iocb: target I/O control block
748  * @buf: user's buffer into which to read data
749  * @count: number of bytes to read
750  * @pos: byte offset in file where reading starts
751  *
752  * We use this function for direct reads instead of calling
753  * generic_file_aio_read() in order to avoid gfar's check to see if
754  * the request starts before the end of the file.  For that check
755  * to work, we must generate a GETATTR before each direct read, and
756  * even then there is a window between the GETATTR and the subsequent
757  * READ where the file size could change.  Our preference is simply
758  * to do all reads the application wants, and the server will take
759  * care of managing the end of file boundary.
760  *
761  * This function also eliminates unnecessarily updating the file's
762  * atime locally, as the NFS server sets the file's atime, and this
763  * client must read the updated atime from the server back into its
764  * cache.
765  */
766 ssize_t nfs_file_direct_read(struct kiocb *iocb, char __user *buf, size_t count, loff_t pos)
767 {
768 	ssize_t retval = -EINVAL;
769 	int page_count;
770 	struct page **pages;
771 	struct file *file = iocb->ki_filp;
772 	struct address_space *mapping = file->f_mapping;
773 
774 	dprintk("nfs: direct read(%s/%s, %lu@%Ld)\n",
775 		file->f_dentry->d_parent->d_name.name,
776 		file->f_dentry->d_name.name,
777 		(unsigned long) count, (long long) pos);
778 
779 	if (count < 0)
780 		goto out;
781 	retval = -EFAULT;
782 	if (!access_ok(VERIFY_WRITE, buf, count))
783 		goto out;
784 	retval = 0;
785 	if (!count)
786 		goto out;
787 
788 	retval = nfs_sync_mapping(mapping);
789 	if (retval)
790 		goto out;
791 
792 	retval = nfs_get_user_pages(READ, (unsigned long) buf,
793 						count, &pages);
794 	if (retval < 0)
795 		goto out;
796 	page_count = retval;
797 
798 	retval = nfs_direct_read(iocb, (unsigned long) buf, count, pos,
799 						pages, page_count);
800 	if (retval > 0)
801 		iocb->ki_pos = pos + retval;
802 
803 out:
804 	return retval;
805 }
806 
807 /**
808  * nfs_file_direct_write - file direct write operation for NFS files
809  * @iocb: target I/O control block
810  * @buf: user's buffer from which to write data
811  * @count: number of bytes to write
812  * @pos: byte offset in file where writing starts
813  *
814  * We use this function for direct writes instead of calling
815  * generic_file_aio_write() in order to avoid taking the inode
816  * semaphore and updating the i_size.  The NFS server will set
817  * the new i_size and this client must read the updated size
818  * back into its cache.  We let the server do generic write
819  * parameter checking and report problems.
820  *
821  * We also avoid an unnecessary invocation of generic_osync_inode(),
822  * as it is fairly meaningless to sync the metadata of an NFS file.
823  *
824  * We eliminate local atime updates, see direct read above.
825  *
826  * We avoid unnecessary page cache invalidations for normal cached
827  * readers of this file.
828  *
829  * Note that O_APPEND is not supported for NFS direct writes, as there
830  * is no atomic O_APPEND write facility in the NFS protocol.
831  */
832 ssize_t nfs_file_direct_write(struct kiocb *iocb, const char __user *buf, size_t count, loff_t pos)
833 {
834 	ssize_t retval;
835 	int page_count;
836 	struct page **pages;
837 	struct file *file = iocb->ki_filp;
838 	struct address_space *mapping = file->f_mapping;
839 
840 	dfprintk(VFS, "nfs: direct write(%s/%s, %lu@%Ld)\n",
841 		file->f_dentry->d_parent->d_name.name,
842 		file->f_dentry->d_name.name,
843 		(unsigned long) count, (long long) pos);
844 
845 	retval = generic_write_checks(file, &pos, &count, 0);
846 	if (retval)
847 		goto out;
848 
849 	retval = -EINVAL;
850 	if ((ssize_t) count < 0)
851 		goto out;
852 	retval = 0;
853 	if (!count)
854 		goto out;
855 
856 	retval = -EFAULT;
857 	if (!access_ok(VERIFY_READ, buf, count))
858 		goto out;
859 
860 	retval = nfs_sync_mapping(mapping);
861 	if (retval)
862 		goto out;
863 
864 	retval = nfs_get_user_pages(WRITE, (unsigned long) buf,
865 						count, &pages);
866 	if (retval < 0)
867 		goto out;
868 	page_count = retval;
869 
870 	retval = nfs_direct_write(iocb, (unsigned long) buf, count,
871 					pos, pages, page_count);
872 
873 	/*
874 	 * XXX: nfs_end_data_update() already ensures this file's
875 	 *      cached data is subsequently invalidated.  Do we really
876 	 *      need to call invalidate_inode_pages2() again here?
877 	 *
878 	 *      For aio writes, this invalidation will almost certainly
879 	 *      occur before the writes complete.  Kind of racey.
880 	 */
881 	if (mapping->nrpages)
882 		invalidate_inode_pages2(mapping);
883 
884 	if (retval > 0)
885 		iocb->ki_pos = pos + retval;
886 
887 out:
888 	return retval;
889 }
890 
891 /**
892  * nfs_init_directcache - create a slab cache for nfs_direct_req structures
893  *
894  */
895 int nfs_init_directcache(void)
896 {
897 	nfs_direct_cachep = kmem_cache_create("nfs_direct_cache",
898 						sizeof(struct nfs_direct_req),
899 						0, (SLAB_RECLAIM_ACCOUNT|
900 							SLAB_MEM_SPREAD),
901 						NULL, NULL);
902 	if (nfs_direct_cachep == NULL)
903 		return -ENOMEM;
904 
905 	return 0;
906 }
907 
908 /**
909  * nfs_init_directcache - destroy the slab cache for nfs_direct_req structures
910  *
911  */
912 void nfs_destroy_directcache(void)
913 {
914 	if (kmem_cache_destroy(nfs_direct_cachep))
915 		printk(KERN_INFO "nfs_direct_cache: not all structures were freed\n");
916 }
917