xref: /linux/fs/nfs/write.c (revision eb2bce7f5e7ac1ca6da434461217fadf3c688d2c)
1 /*
2  * linux/fs/nfs/write.c
3  *
4  * Write file data over NFS.
5  *
6  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
7  */
8 
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/mm.h>
12 #include <linux/pagemap.h>
13 #include <linux/file.h>
14 #include <linux/writeback.h>
15 #include <linux/swap.h>
16 
17 #include <linux/sunrpc/clnt.h>
18 #include <linux/nfs_fs.h>
19 #include <linux/nfs_mount.h>
20 #include <linux/nfs_page.h>
21 #include <linux/backing-dev.h>
22 
23 #include <asm/uaccess.h>
24 #include <linux/smp_lock.h>
25 
26 #include "delegation.h"
27 #include "internal.h"
28 #include "iostat.h"
29 
30 #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
31 
32 #define MIN_POOL_WRITE		(32)
33 #define MIN_POOL_COMMIT		(4)
34 
35 /*
36  * Local function declarations
37  */
38 static struct nfs_page * nfs_update_request(struct nfs_open_context*,
39 					    struct page *,
40 					    unsigned int, unsigned int);
41 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
42 				  struct inode *inode, int ioflags);
43 static const struct rpc_call_ops nfs_write_partial_ops;
44 static const struct rpc_call_ops nfs_write_full_ops;
45 static const struct rpc_call_ops nfs_commit_ops;
46 
47 static struct kmem_cache *nfs_wdata_cachep;
48 static mempool_t *nfs_wdata_mempool;
49 static mempool_t *nfs_commit_mempool;
50 
51 struct nfs_write_data *nfs_commit_alloc(void)
52 {
53 	struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
54 
55 	if (p) {
56 		memset(p, 0, sizeof(*p));
57 		INIT_LIST_HEAD(&p->pages);
58 	}
59 	return p;
60 }
61 
62 void nfs_commit_rcu_free(struct rcu_head *head)
63 {
64 	struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
65 	if (p && (p->pagevec != &p->page_array[0]))
66 		kfree(p->pagevec);
67 	mempool_free(p, nfs_commit_mempool);
68 }
69 
70 void nfs_commit_free(struct nfs_write_data *wdata)
71 {
72 	call_rcu_bh(&wdata->task.u.tk_rcu, nfs_commit_rcu_free);
73 }
74 
75 struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
76 {
77 	struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
78 
79 	if (p) {
80 		memset(p, 0, sizeof(*p));
81 		INIT_LIST_HEAD(&p->pages);
82 		p->npages = pagecount;
83 		if (pagecount <= ARRAY_SIZE(p->page_array))
84 			p->pagevec = p->page_array;
85 		else {
86 			p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
87 			if (!p->pagevec) {
88 				mempool_free(p, nfs_wdata_mempool);
89 				p = NULL;
90 			}
91 		}
92 	}
93 	return p;
94 }
95 
96 static void nfs_writedata_rcu_free(struct rcu_head *head)
97 {
98 	struct nfs_write_data *p = container_of(head, struct nfs_write_data, task.u.tk_rcu);
99 	if (p && (p->pagevec != &p->page_array[0]))
100 		kfree(p->pagevec);
101 	mempool_free(p, nfs_wdata_mempool);
102 }
103 
104 static void nfs_writedata_free(struct nfs_write_data *wdata)
105 {
106 	call_rcu_bh(&wdata->task.u.tk_rcu, nfs_writedata_rcu_free);
107 }
108 
109 void nfs_writedata_release(void *wdata)
110 {
111 	nfs_writedata_free(wdata);
112 }
113 
114 static struct nfs_page *nfs_page_find_request_locked(struct page *page)
115 {
116 	struct nfs_page *req = NULL;
117 
118 	if (PagePrivate(page)) {
119 		req = (struct nfs_page *)page_private(page);
120 		if (req != NULL)
121 			atomic_inc(&req->wb_count);
122 	}
123 	return req;
124 }
125 
126 static struct nfs_page *nfs_page_find_request(struct page *page)
127 {
128 	struct nfs_page *req = NULL;
129 	spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
130 
131 	spin_lock(req_lock);
132 	req = nfs_page_find_request_locked(page);
133 	spin_unlock(req_lock);
134 	return req;
135 }
136 
137 /* Adjust the file length if we're writing beyond the end */
138 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
139 {
140 	struct inode *inode = page->mapping->host;
141 	loff_t end, i_size = i_size_read(inode);
142 	pgoff_t end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
143 
144 	if (i_size > 0 && page->index < end_index)
145 		return;
146 	end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
147 	if (i_size >= end)
148 		return;
149 	nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
150 	i_size_write(inode, end);
151 }
152 
153 /* A writeback failed: mark the page as bad, and invalidate the page cache */
154 static void nfs_set_pageerror(struct page *page)
155 {
156 	SetPageError(page);
157 	nfs_zap_mapping(page->mapping->host, page->mapping);
158 }
159 
160 /* We can set the PG_uptodate flag if we see that a write request
161  * covers the full page.
162  */
163 static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
164 {
165 	if (PageUptodate(page))
166 		return;
167 	if (base != 0)
168 		return;
169 	if (count != nfs_page_length(page))
170 		return;
171 	if (count != PAGE_CACHE_SIZE)
172 		memclear_highpage_flush(page, count, PAGE_CACHE_SIZE - count);
173 	SetPageUptodate(page);
174 }
175 
176 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
177 		unsigned int offset, unsigned int count)
178 {
179 	struct nfs_page	*req;
180 	int ret;
181 
182 	for (;;) {
183 		req = nfs_update_request(ctx, page, offset, count);
184 		if (!IS_ERR(req))
185 			break;
186 		ret = PTR_ERR(req);
187 		if (ret != -EBUSY)
188 			return ret;
189 		ret = nfs_wb_page(page->mapping->host, page);
190 		if (ret != 0)
191 			return ret;
192 	}
193 	/* Update file length */
194 	nfs_grow_file(page, offset, count);
195 	/* Set the PG_uptodate flag? */
196 	nfs_mark_uptodate(page, offset, count);
197 	nfs_unlock_request(req);
198 	return 0;
199 }
200 
201 static int wb_priority(struct writeback_control *wbc)
202 {
203 	if (wbc->for_reclaim)
204 		return FLUSH_HIGHPRI | FLUSH_STABLE;
205 	if (wbc->for_kupdate)
206 		return FLUSH_LOWPRI;
207 	return 0;
208 }
209 
210 /*
211  * NFS congestion control
212  */
213 
214 int nfs_congestion_kb;
215 
216 #define NFS_CONGESTION_ON_THRESH 	(nfs_congestion_kb >> (PAGE_SHIFT-10))
217 #define NFS_CONGESTION_OFF_THRESH	\
218 	(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
219 
220 static int nfs_set_page_writeback(struct page *page)
221 {
222 	int ret = test_set_page_writeback(page);
223 
224 	if (!ret) {
225 		struct inode *inode = page->mapping->host;
226 		struct nfs_server *nfss = NFS_SERVER(inode);
227 
228 		if (atomic_inc_return(&nfss->writeback) >
229 				NFS_CONGESTION_ON_THRESH)
230 			set_bdi_congested(&nfss->backing_dev_info, WRITE);
231 	}
232 	return ret;
233 }
234 
235 static void nfs_end_page_writeback(struct page *page)
236 {
237 	struct inode *inode = page->mapping->host;
238 	struct nfs_server *nfss = NFS_SERVER(inode);
239 
240 	end_page_writeback(page);
241 	if (atomic_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) {
242 		clear_bdi_congested(&nfss->backing_dev_info, WRITE);
243 		congestion_end(WRITE);
244 	}
245 }
246 
247 /*
248  * Find an associated nfs write request, and prepare to flush it out
249  * Returns 1 if there was no write request, or if the request was
250  * already tagged by nfs_set_page_dirty.Returns 0 if the request
251  * was not tagged.
252  * May also return an error if the user signalled nfs_wait_on_request().
253  */
254 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
255 				struct page *page)
256 {
257 	struct nfs_page *req;
258 	struct nfs_inode *nfsi = NFS_I(page->mapping->host);
259 	spinlock_t *req_lock = &nfsi->req_lock;
260 	int ret;
261 
262 	spin_lock(req_lock);
263 	for(;;) {
264 		req = nfs_page_find_request_locked(page);
265 		if (req == NULL) {
266 			spin_unlock(req_lock);
267 			return 1;
268 		}
269 		if (nfs_lock_request_dontget(req))
270 			break;
271 		/* Note: If we hold the page lock, as is the case in nfs_writepage,
272 		 *	 then the call to nfs_lock_request_dontget() will always
273 		 *	 succeed provided that someone hasn't already marked the
274 		 *	 request as dirty (in which case we don't care).
275 		 */
276 		spin_unlock(req_lock);
277 		/* Prevent deadlock! */
278 		nfs_pageio_complete(pgio);
279 		ret = nfs_wait_on_request(req);
280 		nfs_release_request(req);
281 		if (ret != 0)
282 			return ret;
283 		spin_lock(req_lock);
284 	}
285 	if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
286 		/* This request is marked for commit */
287 		spin_unlock(req_lock);
288 		nfs_unlock_request(req);
289 		nfs_pageio_complete(pgio);
290 		return 1;
291 	}
292 	if (nfs_set_page_writeback(page) != 0) {
293 		spin_unlock(req_lock);
294 		BUG();
295 	}
296 	radix_tree_tag_set(&nfsi->nfs_page_tree, req->wb_index,
297 			NFS_PAGE_TAG_WRITEBACK);
298 	ret = test_bit(PG_NEED_FLUSH, &req->wb_flags);
299 	spin_unlock(req_lock);
300 	nfs_pageio_add_request(pgio, req);
301 	return ret;
302 }
303 
304 /*
305  * Write an mmapped page to the server.
306  */
307 static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
308 {
309 	struct nfs_pageio_descriptor mypgio, *pgio;
310 	struct nfs_open_context *ctx;
311 	struct inode *inode = page->mapping->host;
312 	unsigned offset;
313 	int err;
314 
315 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
316 	nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
317 
318 	if (wbc->for_writepages)
319 		pgio = wbc->fs_private;
320 	else {
321 		nfs_pageio_init_write(&mypgio, inode, wb_priority(wbc));
322 		pgio = &mypgio;
323 	}
324 
325 	err = nfs_page_async_flush(pgio, page);
326 	if (err <= 0)
327 		goto out;
328 	err = 0;
329 	offset = nfs_page_length(page);
330 	if (!offset)
331 		goto out;
332 
333 	ctx = nfs_find_open_context(inode, NULL, FMODE_WRITE);
334 	if (ctx == NULL) {
335 		err = -EBADF;
336 		goto out;
337 	}
338 	err = nfs_writepage_setup(ctx, page, 0, offset);
339 	put_nfs_open_context(ctx);
340 	if (err != 0)
341 		goto out;
342 	err = nfs_page_async_flush(pgio, page);
343 	if (err > 0)
344 		err = 0;
345 out:
346 	if (!wbc->for_writepages)
347 		nfs_pageio_complete(pgio);
348 	return err;
349 }
350 
351 int nfs_writepage(struct page *page, struct writeback_control *wbc)
352 {
353 	int err;
354 
355 	err = nfs_writepage_locked(page, wbc);
356 	unlock_page(page);
357 	return err;
358 }
359 
360 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
361 {
362 	struct inode *inode = mapping->host;
363 	struct nfs_pageio_descriptor pgio;
364 	int err;
365 
366 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
367 
368 	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
369 	wbc->fs_private = &pgio;
370 	err = generic_writepages(mapping, wbc);
371 	nfs_pageio_complete(&pgio);
372 	if (err)
373 		return err;
374 	if (pgio.pg_error)
375 		return pgio.pg_error;
376 	return 0;
377 }
378 
379 /*
380  * Insert a write request into an inode
381  */
382 static int nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
383 {
384 	struct nfs_inode *nfsi = NFS_I(inode);
385 	int error;
386 
387 	error = radix_tree_insert(&nfsi->nfs_page_tree, req->wb_index, req);
388 	BUG_ON(error == -EEXIST);
389 	if (error)
390 		return error;
391 	if (!nfsi->npages) {
392 		igrab(inode);
393 		nfs_begin_data_update(inode);
394 		if (nfs_have_delegation(inode, FMODE_WRITE))
395 			nfsi->change_attr++;
396 	}
397 	SetPagePrivate(req->wb_page);
398 	set_page_private(req->wb_page, (unsigned long)req);
399 	if (PageDirty(req->wb_page))
400 		set_bit(PG_NEED_FLUSH, &req->wb_flags);
401 	nfsi->npages++;
402 	atomic_inc(&req->wb_count);
403 	return 0;
404 }
405 
406 /*
407  * Remove a write request from an inode
408  */
409 static void nfs_inode_remove_request(struct nfs_page *req)
410 {
411 	struct inode *inode = req->wb_context->dentry->d_inode;
412 	struct nfs_inode *nfsi = NFS_I(inode);
413 
414 	BUG_ON (!NFS_WBACK_BUSY(req));
415 
416 	spin_lock(&nfsi->req_lock);
417 	set_page_private(req->wb_page, 0);
418 	ClearPagePrivate(req->wb_page);
419 	radix_tree_delete(&nfsi->nfs_page_tree, req->wb_index);
420 	if (test_and_clear_bit(PG_NEED_FLUSH, &req->wb_flags))
421 		__set_page_dirty_nobuffers(req->wb_page);
422 	nfsi->npages--;
423 	if (!nfsi->npages) {
424 		spin_unlock(&nfsi->req_lock);
425 		nfs_end_data_update(inode);
426 		iput(inode);
427 	} else
428 		spin_unlock(&nfsi->req_lock);
429 	nfs_clear_request(req);
430 	nfs_release_request(req);
431 }
432 
433 static void
434 nfs_redirty_request(struct nfs_page *req)
435 {
436 	__set_page_dirty_nobuffers(req->wb_page);
437 }
438 
439 /*
440  * Check if a request is dirty
441  */
442 static inline int
443 nfs_dirty_request(struct nfs_page *req)
444 {
445 	struct page *page = req->wb_page;
446 
447 	if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
448 		return 0;
449 	return !PageWriteback(req->wb_page);
450 }
451 
452 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
453 /*
454  * Add a request to the inode's commit list.
455  */
456 static void
457 nfs_mark_request_commit(struct nfs_page *req)
458 {
459 	struct inode *inode = req->wb_context->dentry->d_inode;
460 	struct nfs_inode *nfsi = NFS_I(inode);
461 
462 	spin_lock(&nfsi->req_lock);
463 	nfs_list_add_request(req, &nfsi->commit);
464 	nfsi->ncommit++;
465 	set_bit(PG_NEED_COMMIT, &(req)->wb_flags);
466 	spin_unlock(&nfsi->req_lock);
467 	inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
468 	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
469 }
470 
471 static inline
472 int nfs_write_need_commit(struct nfs_write_data *data)
473 {
474 	return data->verf.committed != NFS_FILE_SYNC;
475 }
476 
477 static inline
478 int nfs_reschedule_unstable_write(struct nfs_page *req)
479 {
480 	if (test_bit(PG_NEED_COMMIT, &req->wb_flags)) {
481 		nfs_mark_request_commit(req);
482 		return 1;
483 	}
484 	if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
485 		nfs_redirty_request(req);
486 		return 1;
487 	}
488 	return 0;
489 }
490 #else
491 static inline void
492 nfs_mark_request_commit(struct nfs_page *req)
493 {
494 }
495 
496 static inline
497 int nfs_write_need_commit(struct nfs_write_data *data)
498 {
499 	return 0;
500 }
501 
502 static inline
503 int nfs_reschedule_unstable_write(struct nfs_page *req)
504 {
505 	return 0;
506 }
507 #endif
508 
509 /*
510  * Wait for a request to complete.
511  *
512  * Interruptible by signals only if mounted with intr flag.
513  */
514 static int nfs_wait_on_requests_locked(struct inode *inode, pgoff_t idx_start, unsigned int npages)
515 {
516 	struct nfs_inode *nfsi = NFS_I(inode);
517 	struct nfs_page *req;
518 	pgoff_t idx_end, next;
519 	unsigned int		res = 0;
520 	int			error;
521 
522 	if (npages == 0)
523 		idx_end = ~0;
524 	else
525 		idx_end = idx_start + npages - 1;
526 
527 	next = idx_start;
528 	while (radix_tree_gang_lookup_tag(&nfsi->nfs_page_tree, (void **)&req, next, 1, NFS_PAGE_TAG_WRITEBACK)) {
529 		if (req->wb_index > idx_end)
530 			break;
531 
532 		next = req->wb_index + 1;
533 		BUG_ON(!NFS_WBACK_BUSY(req));
534 
535 		atomic_inc(&req->wb_count);
536 		spin_unlock(&nfsi->req_lock);
537 		error = nfs_wait_on_request(req);
538 		nfs_release_request(req);
539 		spin_lock(&nfsi->req_lock);
540 		if (error < 0)
541 			return error;
542 		res++;
543 	}
544 	return res;
545 }
546 
547 static void nfs_cancel_commit_list(struct list_head *head)
548 {
549 	struct nfs_page *req;
550 
551 	while(!list_empty(head)) {
552 		req = nfs_list_entry(head->next);
553 		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
554 		nfs_list_remove_request(req);
555 		clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
556 		nfs_inode_remove_request(req);
557 		nfs_unlock_request(req);
558 	}
559 }
560 
561 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
562 /*
563  * nfs_scan_commit - Scan an inode for commit requests
564  * @inode: NFS inode to scan
565  * @dst: destination list
566  * @idx_start: lower bound of page->index to scan.
567  * @npages: idx_start + npages sets the upper bound to scan.
568  *
569  * Moves requests from the inode's 'commit' request list.
570  * The requests are *not* checked to ensure that they form a contiguous set.
571  */
572 static int
573 nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
574 {
575 	struct nfs_inode *nfsi = NFS_I(inode);
576 	int res = 0;
577 
578 	if (nfsi->ncommit != 0) {
579 		res = nfs_scan_list(nfsi, &nfsi->commit, dst, idx_start, npages);
580 		nfsi->ncommit -= res;
581 		if ((nfsi->ncommit == 0) != list_empty(&nfsi->commit))
582 			printk(KERN_ERR "NFS: desynchronized value of nfs_i.ncommit.\n");
583 	}
584 	return res;
585 }
586 #else
587 static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst, pgoff_t idx_start, unsigned int npages)
588 {
589 	return 0;
590 }
591 #endif
592 
593 /*
594  * Try to update any existing write request, or create one if there is none.
595  * In order to match, the request's credentials must match those of
596  * the calling process.
597  *
598  * Note: Should always be called with the Page Lock held!
599  */
600 static struct nfs_page * nfs_update_request(struct nfs_open_context* ctx,
601 		struct page *page, unsigned int offset, unsigned int bytes)
602 {
603 	struct address_space *mapping = page->mapping;
604 	struct inode *inode = mapping->host;
605 	struct nfs_inode *nfsi = NFS_I(inode);
606 	struct nfs_page		*req, *new = NULL;
607 	pgoff_t		rqend, end;
608 
609 	end = offset + bytes;
610 
611 	for (;;) {
612 		/* Loop over all inode entries and see if we find
613 		 * A request for the page we wish to update
614 		 */
615 		spin_lock(&nfsi->req_lock);
616 		req = nfs_page_find_request_locked(page);
617 		if (req) {
618 			if (!nfs_lock_request_dontget(req)) {
619 				int error;
620 
621 				spin_unlock(&nfsi->req_lock);
622 				error = nfs_wait_on_request(req);
623 				nfs_release_request(req);
624 				if (error < 0) {
625 					if (new)
626 						nfs_release_request(new);
627 					return ERR_PTR(error);
628 				}
629 				continue;
630 			}
631 			spin_unlock(&nfsi->req_lock);
632 			if (new)
633 				nfs_release_request(new);
634 			break;
635 		}
636 
637 		if (new) {
638 			int error;
639 			nfs_lock_request_dontget(new);
640 			error = nfs_inode_add_request(inode, new);
641 			if (error) {
642 				spin_unlock(&nfsi->req_lock);
643 				nfs_unlock_request(new);
644 				return ERR_PTR(error);
645 			}
646 			spin_unlock(&nfsi->req_lock);
647 			return new;
648 		}
649 		spin_unlock(&nfsi->req_lock);
650 
651 		new = nfs_create_request(ctx, inode, page, offset, bytes);
652 		if (IS_ERR(new))
653 			return new;
654 	}
655 
656 	/* We have a request for our page.
657 	 * If the creds don't match, or the
658 	 * page addresses don't match,
659 	 * tell the caller to wait on the conflicting
660 	 * request.
661 	 */
662 	rqend = req->wb_offset + req->wb_bytes;
663 	if (req->wb_context != ctx
664 	    || req->wb_page != page
665 	    || !nfs_dirty_request(req)
666 	    || offset > rqend || end < req->wb_offset) {
667 		nfs_unlock_request(req);
668 		return ERR_PTR(-EBUSY);
669 	}
670 
671 	/* Okay, the request matches. Update the region */
672 	if (offset < req->wb_offset) {
673 		req->wb_offset = offset;
674 		req->wb_pgbase = offset;
675 		req->wb_bytes = rqend - req->wb_offset;
676 	}
677 
678 	if (end > rqend)
679 		req->wb_bytes = end - req->wb_offset;
680 
681 	return req;
682 }
683 
684 int nfs_flush_incompatible(struct file *file, struct page *page)
685 {
686 	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
687 	struct nfs_page	*req;
688 	int do_flush, status;
689 	/*
690 	 * Look for a request corresponding to this page. If there
691 	 * is one, and it belongs to another file, we flush it out
692 	 * before we try to copy anything into the page. Do this
693 	 * due to the lack of an ACCESS-type call in NFSv2.
694 	 * Also do the same if we find a request from an existing
695 	 * dropped page.
696 	 */
697 	do {
698 		req = nfs_page_find_request(page);
699 		if (req == NULL)
700 			return 0;
701 		do_flush = req->wb_page != page || req->wb_context != ctx
702 			|| !nfs_dirty_request(req);
703 		nfs_release_request(req);
704 		if (!do_flush)
705 			return 0;
706 		status = nfs_wb_page(page->mapping->host, page);
707 	} while (status == 0);
708 	return status;
709 }
710 
711 /*
712  * Update and possibly write a cached page of an NFS file.
713  *
714  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
715  * things with a page scheduled for an RPC call (e.g. invalidate it).
716  */
717 int nfs_updatepage(struct file *file, struct page *page,
718 		unsigned int offset, unsigned int count)
719 {
720 	struct nfs_open_context *ctx = (struct nfs_open_context *)file->private_data;
721 	struct inode	*inode = page->mapping->host;
722 	int		status = 0;
723 
724 	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
725 
726 	dprintk("NFS:      nfs_updatepage(%s/%s %d@%Ld)\n",
727 		file->f_path.dentry->d_parent->d_name.name,
728 		file->f_path.dentry->d_name.name, count,
729 		(long long)(page_offset(page) +offset));
730 
731 	/* If we're not using byte range locks, and we know the page
732 	 * is entirely in cache, it may be more efficient to avoid
733 	 * fragmenting write requests.
734 	 */
735 	if (PageUptodate(page) && inode->i_flock == NULL && !(file->f_mode & O_SYNC)) {
736 		count = max(count + offset, nfs_page_length(page));
737 		offset = 0;
738 	}
739 
740 	status = nfs_writepage_setup(ctx, page, offset, count);
741 	__set_page_dirty_nobuffers(page);
742 
743         dprintk("NFS:      nfs_updatepage returns %d (isize %Ld)\n",
744 			status, (long long)i_size_read(inode));
745 	if (status < 0)
746 		nfs_set_pageerror(page);
747 	return status;
748 }
749 
750 static void nfs_writepage_release(struct nfs_page *req)
751 {
752 
753 	if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req)) {
754 		nfs_end_page_writeback(req->wb_page);
755 		nfs_inode_remove_request(req);
756 	} else
757 		nfs_end_page_writeback(req->wb_page);
758 	nfs_clear_page_writeback(req);
759 }
760 
761 static inline int flush_task_priority(int how)
762 {
763 	switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
764 		case FLUSH_HIGHPRI:
765 			return RPC_PRIORITY_HIGH;
766 		case FLUSH_LOWPRI:
767 			return RPC_PRIORITY_LOW;
768 	}
769 	return RPC_PRIORITY_NORMAL;
770 }
771 
772 /*
773  * Set up the argument/result storage required for the RPC call.
774  */
775 static void nfs_write_rpcsetup(struct nfs_page *req,
776 		struct nfs_write_data *data,
777 		const struct rpc_call_ops *call_ops,
778 		unsigned int count, unsigned int offset,
779 		int how)
780 {
781 	struct inode		*inode;
782 	int flags;
783 
784 	/* Set up the RPC argument and reply structs
785 	 * NB: take care not to mess about with data->commit et al. */
786 
787 	data->req = req;
788 	data->inode = inode = req->wb_context->dentry->d_inode;
789 	data->cred = req->wb_context->cred;
790 
791 	data->args.fh     = NFS_FH(inode);
792 	data->args.offset = req_offset(req) + offset;
793 	data->args.pgbase = req->wb_pgbase + offset;
794 	data->args.pages  = data->pagevec;
795 	data->args.count  = count;
796 	data->args.context = req->wb_context;
797 
798 	data->res.fattr   = &data->fattr;
799 	data->res.count   = count;
800 	data->res.verf    = &data->verf;
801 	nfs_fattr_init(&data->fattr);
802 
803 	/* Set up the initial task struct.  */
804 	flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
805 	rpc_init_task(&data->task, NFS_CLIENT(inode), flags, call_ops, data);
806 	NFS_PROTO(inode)->write_setup(data, how);
807 
808 	data->task.tk_priority = flush_task_priority(how);
809 	data->task.tk_cookie = (unsigned long)inode;
810 
811 	dprintk("NFS: %5u initiated write call "
812 		"(req %s/%Ld, %u bytes @ offset %Lu)\n",
813 		data->task.tk_pid,
814 		inode->i_sb->s_id,
815 		(long long)NFS_FILEID(inode),
816 		count,
817 		(unsigned long long)data->args.offset);
818 }
819 
820 static void nfs_execute_write(struct nfs_write_data *data)
821 {
822 	struct rpc_clnt *clnt = NFS_CLIENT(data->inode);
823 	sigset_t oldset;
824 
825 	rpc_clnt_sigmask(clnt, &oldset);
826 	rpc_execute(&data->task);
827 	rpc_clnt_sigunmask(clnt, &oldset);
828 }
829 
830 /*
831  * Generate multiple small requests to write out a single
832  * contiguous dirty area on one page.
833  */
834 static int nfs_flush_multi(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
835 {
836 	struct nfs_page *req = nfs_list_entry(head->next);
837 	struct page *page = req->wb_page;
838 	struct nfs_write_data *data;
839 	size_t wsize = NFS_SERVER(inode)->wsize, nbytes;
840 	unsigned int offset;
841 	int requests = 0;
842 	LIST_HEAD(list);
843 
844 	nfs_list_remove_request(req);
845 
846 	nbytes = count;
847 	do {
848 		size_t len = min(nbytes, wsize);
849 
850 		data = nfs_writedata_alloc(1);
851 		if (!data)
852 			goto out_bad;
853 		list_add(&data->pages, &list);
854 		requests++;
855 		nbytes -= len;
856 	} while (nbytes != 0);
857 	atomic_set(&req->wb_complete, requests);
858 
859 	ClearPageError(page);
860 	offset = 0;
861 	nbytes = count;
862 	do {
863 		data = list_entry(list.next, struct nfs_write_data, pages);
864 		list_del_init(&data->pages);
865 
866 		data->pagevec[0] = page;
867 
868 		if (nbytes < wsize)
869 			wsize = nbytes;
870 		nfs_write_rpcsetup(req, data, &nfs_write_partial_ops,
871 				   wsize, offset, how);
872 		offset += wsize;
873 		nbytes -= wsize;
874 		nfs_execute_write(data);
875 	} while (nbytes != 0);
876 
877 	return 0;
878 
879 out_bad:
880 	while (!list_empty(&list)) {
881 		data = list_entry(list.next, struct nfs_write_data, pages);
882 		list_del(&data->pages);
883 		nfs_writedata_release(data);
884 	}
885 	nfs_redirty_request(req);
886 	nfs_end_page_writeback(req->wb_page);
887 	nfs_clear_page_writeback(req);
888 	return -ENOMEM;
889 }
890 
891 /*
892  * Create an RPC task for the given write request and kick it.
893  * The page must have been locked by the caller.
894  *
895  * It may happen that the page we're passed is not marked dirty.
896  * This is the case if nfs_updatepage detects a conflicting request
897  * that has been written but not committed.
898  */
899 static int nfs_flush_one(struct inode *inode, struct list_head *head, unsigned int npages, size_t count, int how)
900 {
901 	struct nfs_page		*req;
902 	struct page		**pages;
903 	struct nfs_write_data	*data;
904 
905 	data = nfs_writedata_alloc(npages);
906 	if (!data)
907 		goto out_bad;
908 
909 	pages = data->pagevec;
910 	while (!list_empty(head)) {
911 		req = nfs_list_entry(head->next);
912 		nfs_list_remove_request(req);
913 		nfs_list_add_request(req, &data->pages);
914 		ClearPageError(req->wb_page);
915 		*pages++ = req->wb_page;
916 	}
917 	req = nfs_list_entry(data->pages.next);
918 
919 	/* Set up the argument struct */
920 	nfs_write_rpcsetup(req, data, &nfs_write_full_ops, count, 0, how);
921 
922 	nfs_execute_write(data);
923 	return 0;
924  out_bad:
925 	while (!list_empty(head)) {
926 		struct nfs_page *req = nfs_list_entry(head->next);
927 		nfs_list_remove_request(req);
928 		nfs_redirty_request(req);
929 		nfs_end_page_writeback(req->wb_page);
930 		nfs_clear_page_writeback(req);
931 	}
932 	return -ENOMEM;
933 }
934 
935 static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
936 				  struct inode *inode, int ioflags)
937 {
938 	int wsize = NFS_SERVER(inode)->wsize;
939 
940 	if (wsize < PAGE_CACHE_SIZE)
941 		nfs_pageio_init(pgio, inode, nfs_flush_multi, wsize, ioflags);
942 	else
943 		nfs_pageio_init(pgio, inode, nfs_flush_one, wsize, ioflags);
944 }
945 
946 /*
947  * Handle a write reply that flushed part of a page.
948  */
949 static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
950 {
951 	struct nfs_write_data	*data = calldata;
952 	struct nfs_page		*req = data->req;
953 	struct page		*page = req->wb_page;
954 
955 	dprintk("NFS: write (%s/%Ld %d@%Ld)",
956 		req->wb_context->dentry->d_inode->i_sb->s_id,
957 		(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
958 		req->wb_bytes,
959 		(long long)req_offset(req));
960 
961 	if (nfs_writeback_done(task, data) != 0)
962 		return;
963 
964 	if (task->tk_status < 0) {
965 		nfs_set_pageerror(page);
966 		req->wb_context->error = task->tk_status;
967 		dprintk(", error = %d\n", task->tk_status);
968 		goto out;
969 	}
970 
971 	if (nfs_write_need_commit(data)) {
972 		spinlock_t *req_lock = &NFS_I(page->mapping->host)->req_lock;
973 
974 		spin_lock(req_lock);
975 		if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
976 			/* Do nothing we need to resend the writes */
977 		} else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
978 			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
979 			dprintk(" defer commit\n");
980 		} else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
981 			set_bit(PG_NEED_RESCHED, &req->wb_flags);
982 			clear_bit(PG_NEED_COMMIT, &req->wb_flags);
983 			dprintk(" server reboot detected\n");
984 		}
985 		spin_unlock(req_lock);
986 	} else
987 		dprintk(" OK\n");
988 
989 out:
990 	if (atomic_dec_and_test(&req->wb_complete))
991 		nfs_writepage_release(req);
992 }
993 
994 static const struct rpc_call_ops nfs_write_partial_ops = {
995 	.rpc_call_done = nfs_writeback_done_partial,
996 	.rpc_release = nfs_writedata_release,
997 };
998 
999 /*
1000  * Handle a write reply that flushes a whole page.
1001  *
1002  * FIXME: There is an inherent race with invalidate_inode_pages and
1003  *	  writebacks since the page->count is kept > 1 for as long
1004  *	  as the page has a write request pending.
1005  */
1006 static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
1007 {
1008 	struct nfs_write_data	*data = calldata;
1009 	struct nfs_page		*req;
1010 	struct page		*page;
1011 
1012 	if (nfs_writeback_done(task, data) != 0)
1013 		return;
1014 
1015 	/* Update attributes as result of writeback. */
1016 	while (!list_empty(&data->pages)) {
1017 		req = nfs_list_entry(data->pages.next);
1018 		nfs_list_remove_request(req);
1019 		page = req->wb_page;
1020 
1021 		dprintk("NFS: write (%s/%Ld %d@%Ld)",
1022 			req->wb_context->dentry->d_inode->i_sb->s_id,
1023 			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1024 			req->wb_bytes,
1025 			(long long)req_offset(req));
1026 
1027 		if (task->tk_status < 0) {
1028 			nfs_set_pageerror(page);
1029 			req->wb_context->error = task->tk_status;
1030 			dprintk(", error = %d\n", task->tk_status);
1031 			goto remove_request;
1032 		}
1033 
1034 		if (nfs_write_need_commit(data)) {
1035 			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1036 			nfs_mark_request_commit(req);
1037 			nfs_end_page_writeback(page);
1038 			dprintk(" marked for commit\n");
1039 			goto next;
1040 		}
1041 		dprintk(" OK\n");
1042 remove_request:
1043 		nfs_end_page_writeback(page);
1044 		nfs_inode_remove_request(req);
1045 	next:
1046 		nfs_clear_page_writeback(req);
1047 	}
1048 }
1049 
1050 static const struct rpc_call_ops nfs_write_full_ops = {
1051 	.rpc_call_done = nfs_writeback_done_full,
1052 	.rpc_release = nfs_writedata_release,
1053 };
1054 
1055 
1056 /*
1057  * This function is called when the WRITE call is complete.
1058  */
1059 int nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1060 {
1061 	struct nfs_writeargs	*argp = &data->args;
1062 	struct nfs_writeres	*resp = &data->res;
1063 	int status;
1064 
1065 	dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
1066 		task->tk_pid, task->tk_status);
1067 
1068 	/*
1069 	 * ->write_done will attempt to use post-op attributes to detect
1070 	 * conflicting writes by other clients.  A strict interpretation
1071 	 * of close-to-open would allow us to continue caching even if
1072 	 * another writer had changed the file, but some applications
1073 	 * depend on tighter cache coherency when writing.
1074 	 */
1075 	status = NFS_PROTO(data->inode)->write_done(task, data);
1076 	if (status != 0)
1077 		return status;
1078 	nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1079 
1080 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1081 	if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
1082 		/* We tried a write call, but the server did not
1083 		 * commit data to stable storage even though we
1084 		 * requested it.
1085 		 * Note: There is a known bug in Tru64 < 5.0 in which
1086 		 *	 the server reports NFS_DATA_SYNC, but performs
1087 		 *	 NFS_FILE_SYNC. We therefore implement this checking
1088 		 *	 as a dprintk() in order to avoid filling syslog.
1089 		 */
1090 		static unsigned long    complain;
1091 
1092 		if (time_before(complain, jiffies)) {
1093 			dprintk("NFS: faulty NFS server %s:"
1094 				" (committed = %d) != (stable = %d)\n",
1095 				NFS_SERVER(data->inode)->nfs_client->cl_hostname,
1096 				resp->verf->committed, argp->stable);
1097 			complain = jiffies + 300 * HZ;
1098 		}
1099 	}
1100 #endif
1101 	/* Is this a short write? */
1102 	if (task->tk_status >= 0 && resp->count < argp->count) {
1103 		static unsigned long    complain;
1104 
1105 		nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
1106 
1107 		/* Has the server at least made some progress? */
1108 		if (resp->count != 0) {
1109 			/* Was this an NFSv2 write or an NFSv3 stable write? */
1110 			if (resp->verf->committed != NFS_UNSTABLE) {
1111 				/* Resend from where the server left off */
1112 				argp->offset += resp->count;
1113 				argp->pgbase += resp->count;
1114 				argp->count -= resp->count;
1115 			} else {
1116 				/* Resend as a stable write in order to avoid
1117 				 * headaches in the case of a server crash.
1118 				 */
1119 				argp->stable = NFS_FILE_SYNC;
1120 			}
1121 			rpc_restart_call(task);
1122 			return -EAGAIN;
1123 		}
1124 		if (time_before(complain, jiffies)) {
1125 			printk(KERN_WARNING
1126 			       "NFS: Server wrote zero bytes, expected %u.\n",
1127 					argp->count);
1128 			complain = jiffies + 300 * HZ;
1129 		}
1130 		/* Can't do anything about it except throw an error. */
1131 		task->tk_status = -EIO;
1132 	}
1133 	return 0;
1134 }
1135 
1136 
1137 #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1138 void nfs_commit_release(void *wdata)
1139 {
1140 	nfs_commit_free(wdata);
1141 }
1142 
1143 /*
1144  * Set up the argument/result storage required for the RPC call.
1145  */
1146 static void nfs_commit_rpcsetup(struct list_head *head,
1147 		struct nfs_write_data *data,
1148 		int how)
1149 {
1150 	struct nfs_page		*first;
1151 	struct inode		*inode;
1152 	int flags;
1153 
1154 	/* Set up the RPC argument and reply structs
1155 	 * NB: take care not to mess about with data->commit et al. */
1156 
1157 	list_splice_init(head, &data->pages);
1158 	first = nfs_list_entry(data->pages.next);
1159 	inode = first->wb_context->dentry->d_inode;
1160 
1161 	data->inode	  = inode;
1162 	data->cred	  = first->wb_context->cred;
1163 
1164 	data->args.fh     = NFS_FH(data->inode);
1165 	/* Note: we always request a commit of the entire inode */
1166 	data->args.offset = 0;
1167 	data->args.count  = 0;
1168 	data->res.count   = 0;
1169 	data->res.fattr   = &data->fattr;
1170 	data->res.verf    = &data->verf;
1171 	nfs_fattr_init(&data->fattr);
1172 
1173 	/* Set up the initial task struct.  */
1174 	flags = (how & FLUSH_SYNC) ? 0 : RPC_TASK_ASYNC;
1175 	rpc_init_task(&data->task, NFS_CLIENT(inode), flags, &nfs_commit_ops, data);
1176 	NFS_PROTO(inode)->commit_setup(data, how);
1177 
1178 	data->task.tk_priority = flush_task_priority(how);
1179 	data->task.tk_cookie = (unsigned long)inode;
1180 
1181 	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1182 }
1183 
1184 /*
1185  * Commit dirty pages
1186  */
1187 static int
1188 nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1189 {
1190 	struct nfs_write_data	*data;
1191 	struct nfs_page         *req;
1192 
1193 	data = nfs_commit_alloc();
1194 
1195 	if (!data)
1196 		goto out_bad;
1197 
1198 	/* Set up the argument struct */
1199 	nfs_commit_rpcsetup(head, data, how);
1200 
1201 	nfs_execute_write(data);
1202 	return 0;
1203  out_bad:
1204 	while (!list_empty(head)) {
1205 		req = nfs_list_entry(head->next);
1206 		nfs_list_remove_request(req);
1207 		nfs_mark_request_commit(req);
1208 		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1209 		nfs_clear_page_writeback(req);
1210 	}
1211 	return -ENOMEM;
1212 }
1213 
1214 /*
1215  * COMMIT call returned
1216  */
1217 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1218 {
1219 	struct nfs_write_data	*data = calldata;
1220 	struct nfs_page		*req;
1221 
1222         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1223                                 task->tk_pid, task->tk_status);
1224 
1225 	/* Call the NFS version-specific code */
1226 	if (NFS_PROTO(data->inode)->commit_done(task, data) != 0)
1227 		return;
1228 
1229 	while (!list_empty(&data->pages)) {
1230 		req = nfs_list_entry(data->pages.next);
1231 		nfs_list_remove_request(req);
1232 		clear_bit(PG_NEED_COMMIT, &(req)->wb_flags);
1233 		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1234 
1235 		dprintk("NFS: commit (%s/%Ld %d@%Ld)",
1236 			req->wb_context->dentry->d_inode->i_sb->s_id,
1237 			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1238 			req->wb_bytes,
1239 			(long long)req_offset(req));
1240 		if (task->tk_status < 0) {
1241 			req->wb_context->error = task->tk_status;
1242 			nfs_inode_remove_request(req);
1243 			dprintk(", error = %d\n", task->tk_status);
1244 			goto next;
1245 		}
1246 
1247 		/* Okay, COMMIT succeeded, apparently. Check the verifier
1248 		 * returned by the server against all stored verfs. */
1249 		if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
1250 			/* We have a match */
1251 			nfs_inode_remove_request(req);
1252 			dprintk(" OK\n");
1253 			goto next;
1254 		}
1255 		/* We have a mismatch. Write the page again */
1256 		dprintk(" mismatch\n");
1257 		nfs_redirty_request(req);
1258 	next:
1259 		nfs_clear_page_writeback(req);
1260 	}
1261 }
1262 
1263 static const struct rpc_call_ops nfs_commit_ops = {
1264 	.rpc_call_done = nfs_commit_done,
1265 	.rpc_release = nfs_commit_release,
1266 };
1267 
1268 int nfs_commit_inode(struct inode *inode, int how)
1269 {
1270 	struct nfs_inode *nfsi = NFS_I(inode);
1271 	LIST_HEAD(head);
1272 	int res;
1273 
1274 	spin_lock(&nfsi->req_lock);
1275 	res = nfs_scan_commit(inode, &head, 0, 0);
1276 	spin_unlock(&nfsi->req_lock);
1277 	if (res) {
1278 		int error = nfs_commit_list(inode, &head, how);
1279 		if (error < 0)
1280 			return error;
1281 	}
1282 	return res;
1283 }
1284 #else
1285 static inline int nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1286 {
1287 	return 0;
1288 }
1289 #endif
1290 
1291 long nfs_sync_mapping_wait(struct address_space *mapping, struct writeback_control *wbc, int how)
1292 {
1293 	struct inode *inode = mapping->host;
1294 	struct nfs_inode *nfsi = NFS_I(inode);
1295 	pgoff_t idx_start, idx_end;
1296 	unsigned int npages = 0;
1297 	LIST_HEAD(head);
1298 	int nocommit = how & FLUSH_NOCOMMIT;
1299 	long pages, ret;
1300 
1301 	/* FIXME */
1302 	if (wbc->range_cyclic)
1303 		idx_start = 0;
1304 	else {
1305 		idx_start = wbc->range_start >> PAGE_CACHE_SHIFT;
1306 		idx_end = wbc->range_end >> PAGE_CACHE_SHIFT;
1307 		if (idx_end > idx_start) {
1308 			pgoff_t l_npages = 1 + idx_end - idx_start;
1309 			npages = l_npages;
1310 			if (sizeof(npages) != sizeof(l_npages) &&
1311 					(pgoff_t)npages != l_npages)
1312 				npages = 0;
1313 		}
1314 	}
1315 	how &= ~FLUSH_NOCOMMIT;
1316 	spin_lock(&nfsi->req_lock);
1317 	do {
1318 		ret = nfs_wait_on_requests_locked(inode, idx_start, npages);
1319 		if (ret != 0)
1320 			continue;
1321 		if (nocommit)
1322 			break;
1323 		pages = nfs_scan_commit(inode, &head, idx_start, npages);
1324 		if (pages == 0)
1325 			break;
1326 		if (how & FLUSH_INVALIDATE) {
1327 			spin_unlock(&nfsi->req_lock);
1328 			nfs_cancel_commit_list(&head);
1329 			ret = pages;
1330 			spin_lock(&nfsi->req_lock);
1331 			continue;
1332 		}
1333 		pages += nfs_scan_commit(inode, &head, 0, 0);
1334 		spin_unlock(&nfsi->req_lock);
1335 		ret = nfs_commit_list(inode, &head, how);
1336 		spin_lock(&nfsi->req_lock);
1337 	} while (ret >= 0);
1338 	spin_unlock(&nfsi->req_lock);
1339 	return ret;
1340 }
1341 
1342 /*
1343  * flush the inode to disk.
1344  */
1345 int nfs_wb_all(struct inode *inode)
1346 {
1347 	struct address_space *mapping = inode->i_mapping;
1348 	struct writeback_control wbc = {
1349 		.bdi = mapping->backing_dev_info,
1350 		.sync_mode = WB_SYNC_ALL,
1351 		.nr_to_write = LONG_MAX,
1352 		.for_writepages = 1,
1353 		.range_cyclic = 1,
1354 	};
1355 	int ret;
1356 
1357 	ret = nfs_writepages(mapping, &wbc);
1358 	if (ret < 0)
1359 		goto out;
1360 	ret = nfs_sync_mapping_wait(mapping, &wbc, 0);
1361 	if (ret >= 0)
1362 		return 0;
1363 out:
1364 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1365 	return ret;
1366 }
1367 
1368 int nfs_sync_mapping_range(struct address_space *mapping, loff_t range_start, loff_t range_end, int how)
1369 {
1370 	struct writeback_control wbc = {
1371 		.bdi = mapping->backing_dev_info,
1372 		.sync_mode = WB_SYNC_ALL,
1373 		.nr_to_write = LONG_MAX,
1374 		.range_start = range_start,
1375 		.range_end = range_end,
1376 		.for_writepages = 1,
1377 	};
1378 	int ret;
1379 
1380 	ret = nfs_writepages(mapping, &wbc);
1381 	if (ret < 0)
1382 		goto out;
1383 	ret = nfs_sync_mapping_wait(mapping, &wbc, how);
1384 	if (ret >= 0)
1385 		return 0;
1386 out:
1387 	__mark_inode_dirty(mapping->host, I_DIRTY_PAGES);
1388 	return ret;
1389 }
1390 
1391 int nfs_wb_page_priority(struct inode *inode, struct page *page, int how)
1392 {
1393 	loff_t range_start = page_offset(page);
1394 	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
1395 	struct writeback_control wbc = {
1396 		.bdi = page->mapping->backing_dev_info,
1397 		.sync_mode = WB_SYNC_ALL,
1398 		.nr_to_write = LONG_MAX,
1399 		.range_start = range_start,
1400 		.range_end = range_end,
1401 	};
1402 	int ret;
1403 
1404 	BUG_ON(!PageLocked(page));
1405 	if (clear_page_dirty_for_io(page)) {
1406 		ret = nfs_writepage_locked(page, &wbc);
1407 		if (ret < 0)
1408 			goto out;
1409 	}
1410 	if (!PagePrivate(page))
1411 		return 0;
1412 	ret = nfs_sync_mapping_wait(page->mapping, &wbc, how);
1413 	if (ret >= 0)
1414 		return 0;
1415 out:
1416 	__mark_inode_dirty(inode, I_DIRTY_PAGES);
1417 	return ret;
1418 }
1419 
1420 /*
1421  * Write back all requests on one page - we do this before reading it.
1422  */
1423 int nfs_wb_page(struct inode *inode, struct page* page)
1424 {
1425 	return nfs_wb_page_priority(inode, page, FLUSH_STABLE);
1426 }
1427 
1428 int nfs_set_page_dirty(struct page *page)
1429 {
1430 	struct address_space *mapping = page->mapping;
1431 	struct inode *inode;
1432 	spinlock_t *req_lock;
1433 	struct nfs_page *req;
1434 	int ret;
1435 
1436 	if (!mapping)
1437 		goto out_raced;
1438 	inode = mapping->host;
1439 	if (!inode)
1440 		goto out_raced;
1441 	req_lock = &NFS_I(inode)->req_lock;
1442 	spin_lock(req_lock);
1443 	req = nfs_page_find_request_locked(page);
1444 	if (req != NULL) {
1445 		/* Mark any existing write requests for flushing */
1446 		ret = !test_and_set_bit(PG_NEED_FLUSH, &req->wb_flags);
1447 		spin_unlock(req_lock);
1448 		nfs_release_request(req);
1449 		return ret;
1450 	}
1451 	ret = __set_page_dirty_nobuffers(page);
1452 	spin_unlock(req_lock);
1453 	return ret;
1454 out_raced:
1455 	return !TestSetPageDirty(page);
1456 }
1457 
1458 
1459 int __init nfs_init_writepagecache(void)
1460 {
1461 	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1462 					     sizeof(struct nfs_write_data),
1463 					     0, SLAB_HWCACHE_ALIGN,
1464 					     NULL, NULL);
1465 	if (nfs_wdata_cachep == NULL)
1466 		return -ENOMEM;
1467 
1468 	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
1469 						     nfs_wdata_cachep);
1470 	if (nfs_wdata_mempool == NULL)
1471 		return -ENOMEM;
1472 
1473 	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1474 						      nfs_wdata_cachep);
1475 	if (nfs_commit_mempool == NULL)
1476 		return -ENOMEM;
1477 
1478 	/*
1479 	 * NFS congestion size, scale with available memory.
1480 	 *
1481 	 *  64MB:    8192k
1482 	 * 128MB:   11585k
1483 	 * 256MB:   16384k
1484 	 * 512MB:   23170k
1485 	 *   1GB:   32768k
1486 	 *   2GB:   46340k
1487 	 *   4GB:   65536k
1488 	 *   8GB:   92681k
1489 	 *  16GB:  131072k
1490 	 *
1491 	 * This allows larger machines to have larger/more transfers.
1492 	 * Limit the default to 256M
1493 	 */
1494 	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
1495 	if (nfs_congestion_kb > 256*1024)
1496 		nfs_congestion_kb = 256*1024;
1497 
1498 	return 0;
1499 }
1500 
1501 void nfs_destroy_writepagecache(void)
1502 {
1503 	mempool_destroy(nfs_commit_mempool);
1504 	mempool_destroy(nfs_wdata_mempool);
1505 	kmem_cache_destroy(nfs_wdata_cachep);
1506 }
1507 
1508