xref: /linux/fs/nfs/write.c (revision 8ccd271f7a3a846ce6f85ead0760d9d12994a611)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * linux/fs/nfs/write.c
31da177e4SLinus Torvalds  *
47c85d900STrond Myklebust  * Write file data over NFS.
51da177e4SLinus Torvalds  *
61da177e4SLinus Torvalds  * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
71da177e4SLinus Torvalds  */
81da177e4SLinus Torvalds 
91da177e4SLinus Torvalds #include <linux/types.h>
101da177e4SLinus Torvalds #include <linux/slab.h>
111da177e4SLinus Torvalds #include <linux/mm.h>
121da177e4SLinus Torvalds #include <linux/pagemap.h>
131da177e4SLinus Torvalds #include <linux/file.h>
141da177e4SLinus Torvalds #include <linux/writeback.h>
1589a09141SPeter Zijlstra #include <linux/swap.h>
16074cc1deSTrond Myklebust #include <linux/migrate.h>
171da177e4SLinus Torvalds 
181da177e4SLinus Torvalds #include <linux/sunrpc/clnt.h>
191da177e4SLinus Torvalds #include <linux/nfs_fs.h>
201da177e4SLinus Torvalds #include <linux/nfs_mount.h>
211da177e4SLinus Torvalds #include <linux/nfs_page.h>
223fcfab16SAndrew Morton #include <linux/backing-dev.h>
23afeacc8cSPaul Gortmaker #include <linux/export.h>
243fcfab16SAndrew Morton 
251da177e4SLinus Torvalds #include <asm/uaccess.h>
261da177e4SLinus Torvalds 
271da177e4SLinus Torvalds #include "delegation.h"
2849a70f27STrond Myklebust #include "internal.h"
2991d5b470SChuck Lever #include "iostat.h"
30def6ed7eSAndy Adamson #include "nfs4_fs.h"
31074cc1deSTrond Myklebust #include "fscache.h"
3294ad1c80SFred Isaman #include "pnfs.h"
331da177e4SLinus Torvalds 
341da177e4SLinus Torvalds #define NFSDBG_FACILITY		NFSDBG_PAGECACHE
351da177e4SLinus Torvalds 
361da177e4SLinus Torvalds #define MIN_POOL_WRITE		(32)
371da177e4SLinus Torvalds #define MIN_POOL_COMMIT		(4)
381da177e4SLinus Torvalds 
391da177e4SLinus Torvalds /*
401da177e4SLinus Torvalds  * Local function declarations
411da177e4SLinus Torvalds  */
42c63c7b05STrond Myklebust static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
43c63c7b05STrond Myklebust 				  struct inode *inode, int ioflags);
44f8512ad0SFred Isaman static void nfs_redirty_request(struct nfs_page *req);
45788e7a89STrond Myklebust static const struct rpc_call_ops nfs_write_partial_ops;
46788e7a89STrond Myklebust static const struct rpc_call_ops nfs_write_full_ops;
47788e7a89STrond Myklebust static const struct rpc_call_ops nfs_commit_ops;
481da177e4SLinus Torvalds 
49e18b890bSChristoph Lameter static struct kmem_cache *nfs_wdata_cachep;
503feb2d49STrond Myklebust static mempool_t *nfs_wdata_mempool;
511da177e4SLinus Torvalds static mempool_t *nfs_commit_mempool;
521da177e4SLinus Torvalds 
53c9d8f89dSTrond Myklebust struct nfs_write_data *nfs_commitdata_alloc(void)
541da177e4SLinus Torvalds {
55e6b4f8daSChristoph Lameter 	struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
5640859d7eSChuck Lever 
571da177e4SLinus Torvalds 	if (p) {
581da177e4SLinus Torvalds 		memset(p, 0, sizeof(*p));
591da177e4SLinus Torvalds 		INIT_LIST_HEAD(&p->pages);
601da177e4SLinus Torvalds 	}
611da177e4SLinus Torvalds 	return p;
621da177e4SLinus Torvalds }
63e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
641da177e4SLinus Torvalds 
655e4424afSTrond Myklebust void nfs_commit_free(struct nfs_write_data *p)
661da177e4SLinus Torvalds {
6740859d7eSChuck Lever 	if (p && (p->pagevec != &p->page_array[0]))
6840859d7eSChuck Lever 		kfree(p->pagevec);
691da177e4SLinus Torvalds 	mempool_free(p, nfs_commit_mempool);
701da177e4SLinus Torvalds }
71e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_commit_free);
721da177e4SLinus Torvalds 
738d5658c9STrond Myklebust struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount)
743feb2d49STrond Myklebust {
75e6b4f8daSChristoph Lameter 	struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
763feb2d49STrond Myklebust 
773feb2d49STrond Myklebust 	if (p) {
783feb2d49STrond Myklebust 		memset(p, 0, sizeof(*p));
793feb2d49STrond Myklebust 		INIT_LIST_HEAD(&p->pages);
80e9f7bee1STrond Myklebust 		p->npages = pagecount;
810d0b5cb3SChuck Lever 		if (pagecount <= ARRAY_SIZE(p->page_array))
820d0b5cb3SChuck Lever 			p->pagevec = p->page_array;
833feb2d49STrond Myklebust 		else {
840d0b5cb3SChuck Lever 			p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
850d0b5cb3SChuck Lever 			if (!p->pagevec) {
863feb2d49STrond Myklebust 				mempool_free(p, nfs_wdata_mempool);
873feb2d49STrond Myklebust 				p = NULL;
883feb2d49STrond Myklebust 			}
893feb2d49STrond Myklebust 		}
903feb2d49STrond Myklebust 	}
913feb2d49STrond Myklebust 	return p;
923feb2d49STrond Myklebust }
933feb2d49STrond Myklebust 
941ae88b2eSTrond Myklebust void nfs_writedata_free(struct nfs_write_data *p)
953feb2d49STrond Myklebust {
963feb2d49STrond Myklebust 	if (p && (p->pagevec != &p->page_array[0]))
973feb2d49STrond Myklebust 		kfree(p->pagevec);
983feb2d49STrond Myklebust 	mempool_free(p, nfs_wdata_mempool);
993feb2d49STrond Myklebust }
1003feb2d49STrond Myklebust 
101dce81290STrond Myklebust void nfs_writedata_release(struct nfs_write_data *wdata)
1021da177e4SLinus Torvalds {
103383ba719STrond Myklebust 	put_nfs_open_context(wdata->args.context);
1041da177e4SLinus Torvalds 	nfs_writedata_free(wdata);
1051da177e4SLinus Torvalds }
1061da177e4SLinus Torvalds 
1077b159fc1STrond Myklebust static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
1087b159fc1STrond Myklebust {
1097b159fc1STrond Myklebust 	ctx->error = error;
1107b159fc1STrond Myklebust 	smp_wmb();
1117b159fc1STrond Myklebust 	set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
1127b159fc1STrond Myklebust }
1137b159fc1STrond Myklebust 
114277459d2STrond Myklebust static struct nfs_page *nfs_page_find_request_locked(struct page *page)
115277459d2STrond Myklebust {
116277459d2STrond Myklebust 	struct nfs_page *req = NULL;
117277459d2STrond Myklebust 
118277459d2STrond Myklebust 	if (PagePrivate(page)) {
119277459d2STrond Myklebust 		req = (struct nfs_page *)page_private(page);
120277459d2STrond Myklebust 		if (req != NULL)
121c03b4024STrond Myklebust 			kref_get(&req->wb_kref);
122277459d2STrond Myklebust 	}
123277459d2STrond Myklebust 	return req;
124277459d2STrond Myklebust }
125277459d2STrond Myklebust 
126277459d2STrond Myklebust static struct nfs_page *nfs_page_find_request(struct page *page)
127277459d2STrond Myklebust {
128587142f8STrond Myklebust 	struct inode *inode = page->mapping->host;
129277459d2STrond Myklebust 	struct nfs_page *req = NULL;
130277459d2STrond Myklebust 
131587142f8STrond Myklebust 	spin_lock(&inode->i_lock);
132277459d2STrond Myklebust 	req = nfs_page_find_request_locked(page);
133587142f8STrond Myklebust 	spin_unlock(&inode->i_lock);
134277459d2STrond Myklebust 	return req;
135277459d2STrond Myklebust }
136277459d2STrond Myklebust 
1371da177e4SLinus Torvalds /* Adjust the file length if we're writing beyond the end */
1381da177e4SLinus Torvalds static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count)
1391da177e4SLinus Torvalds {
1401da177e4SLinus Torvalds 	struct inode *inode = page->mapping->host;
141a3d01454STrond Myklebust 	loff_t end, i_size;
142a3d01454STrond Myklebust 	pgoff_t end_index;
1431da177e4SLinus Torvalds 
144a3d01454STrond Myklebust 	spin_lock(&inode->i_lock);
145a3d01454STrond Myklebust 	i_size = i_size_read(inode);
146a3d01454STrond Myklebust 	end_index = (i_size - 1) >> PAGE_CACHE_SHIFT;
1471da177e4SLinus Torvalds 	if (i_size > 0 && page->index < end_index)
148a3d01454STrond Myklebust 		goto out;
1491da177e4SLinus Torvalds 	end = ((loff_t)page->index << PAGE_CACHE_SHIFT) + ((loff_t)offset+count);
1501da177e4SLinus Torvalds 	if (i_size >= end)
151a3d01454STrond Myklebust 		goto out;
1521da177e4SLinus Torvalds 	i_size_write(inode, end);
153a3d01454STrond Myklebust 	nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
154a3d01454STrond Myklebust out:
155a3d01454STrond Myklebust 	spin_unlock(&inode->i_lock);
1561da177e4SLinus Torvalds }
1571da177e4SLinus Torvalds 
158a301b777STrond Myklebust /* A writeback failed: mark the page as bad, and invalidate the page cache */
159a301b777STrond Myklebust static void nfs_set_pageerror(struct page *page)
160a301b777STrond Myklebust {
161a301b777STrond Myklebust 	SetPageError(page);
162a301b777STrond Myklebust 	nfs_zap_mapping(page->mapping->host, page->mapping);
163a301b777STrond Myklebust }
164a301b777STrond Myklebust 
1651da177e4SLinus Torvalds /* We can set the PG_uptodate flag if we see that a write request
1661da177e4SLinus Torvalds  * covers the full page.
1671da177e4SLinus Torvalds  */
1681da177e4SLinus Torvalds static void nfs_mark_uptodate(struct page *page, unsigned int base, unsigned int count)
1691da177e4SLinus Torvalds {
1701da177e4SLinus Torvalds 	if (PageUptodate(page))
1711da177e4SLinus Torvalds 		return;
1721da177e4SLinus Torvalds 	if (base != 0)
1731da177e4SLinus Torvalds 		return;
17449a70f27STrond Myklebust 	if (count != nfs_page_length(page))
1751da177e4SLinus Torvalds 		return;
1761da177e4SLinus Torvalds 	SetPageUptodate(page);
1771da177e4SLinus Torvalds }
1781da177e4SLinus Torvalds 
1791da177e4SLinus Torvalds static int wb_priority(struct writeback_control *wbc)
1801da177e4SLinus Torvalds {
1811da177e4SLinus Torvalds 	if (wbc->for_reclaim)
182c63c7b05STrond Myklebust 		return FLUSH_HIGHPRI | FLUSH_STABLE;
183b17621feSWu Fengguang 	if (wbc->for_kupdate || wbc->for_background)
184b31268acSTrond Myklebust 		return FLUSH_LOWPRI | FLUSH_COND_STABLE;
185b31268acSTrond Myklebust 	return FLUSH_COND_STABLE;
1861da177e4SLinus Torvalds }
1871da177e4SLinus Torvalds 
1881da177e4SLinus Torvalds /*
18989a09141SPeter Zijlstra  * NFS congestion control
19089a09141SPeter Zijlstra  */
19189a09141SPeter Zijlstra 
19289a09141SPeter Zijlstra int nfs_congestion_kb;
19389a09141SPeter Zijlstra 
19489a09141SPeter Zijlstra #define NFS_CONGESTION_ON_THRESH 	(nfs_congestion_kb >> (PAGE_SHIFT-10))
19589a09141SPeter Zijlstra #define NFS_CONGESTION_OFF_THRESH	\
19689a09141SPeter Zijlstra 	(NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
19789a09141SPeter Zijlstra 
1985a6d41b3STrond Myklebust static int nfs_set_page_writeback(struct page *page)
19989a09141SPeter Zijlstra {
2005a6d41b3STrond Myklebust 	int ret = test_set_page_writeback(page);
2015a6d41b3STrond Myklebust 
2025a6d41b3STrond Myklebust 	if (!ret) {
20389a09141SPeter Zijlstra 		struct inode *inode = page->mapping->host;
20489a09141SPeter Zijlstra 		struct nfs_server *nfss = NFS_SERVER(inode);
20589a09141SPeter Zijlstra 
206a6305ddbSTrond Myklebust 		page_cache_get(page);
207277866a0SPeter Zijlstra 		if (atomic_long_inc_return(&nfss->writeback) >
2088aa7e847SJens Axboe 				NFS_CONGESTION_ON_THRESH) {
2098aa7e847SJens Axboe 			set_bdi_congested(&nfss->backing_dev_info,
2108aa7e847SJens Axboe 						BLK_RW_ASYNC);
2118aa7e847SJens Axboe 		}
21289a09141SPeter Zijlstra 	}
2135a6d41b3STrond Myklebust 	return ret;
21489a09141SPeter Zijlstra }
21589a09141SPeter Zijlstra 
21689a09141SPeter Zijlstra static void nfs_end_page_writeback(struct page *page)
21789a09141SPeter Zijlstra {
21889a09141SPeter Zijlstra 	struct inode *inode = page->mapping->host;
21989a09141SPeter Zijlstra 	struct nfs_server *nfss = NFS_SERVER(inode);
22089a09141SPeter Zijlstra 
22189a09141SPeter Zijlstra 	end_page_writeback(page);
222a6305ddbSTrond Myklebust 	page_cache_release(page);
223c4dc4beeSPeter Zijlstra 	if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
2248aa7e847SJens Axboe 		clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
22589a09141SPeter Zijlstra }
22689a09141SPeter Zijlstra 
227cfb506e1STrond Myklebust static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
228e261f51fSTrond Myklebust {
229587142f8STrond Myklebust 	struct inode *inode = page->mapping->host;
230e261f51fSTrond Myklebust 	struct nfs_page *req;
231e261f51fSTrond Myklebust 	int ret;
232e261f51fSTrond Myklebust 
233587142f8STrond Myklebust 	spin_lock(&inode->i_lock);
234e261f51fSTrond Myklebust 	for (;;) {
235e261f51fSTrond Myklebust 		req = nfs_page_find_request_locked(page);
236074cc1deSTrond Myklebust 		if (req == NULL)
237074cc1deSTrond Myklebust 			break;
2389994b62bSFred Isaman 		if (nfs_lock_request_dontget(req))
239e261f51fSTrond Myklebust 			break;
240e261f51fSTrond Myklebust 		/* Note: If we hold the page lock, as is the case in nfs_writepage,
2419994b62bSFred Isaman 		 *	 then the call to nfs_lock_request_dontget() will always
242e261f51fSTrond Myklebust 		 *	 succeed provided that someone hasn't already marked the
243e261f51fSTrond Myklebust 		 *	 request as dirty (in which case we don't care).
244e261f51fSTrond Myklebust 		 */
245587142f8STrond Myklebust 		spin_unlock(&inode->i_lock);
246cfb506e1STrond Myklebust 		if (!nonblock)
247e261f51fSTrond Myklebust 			ret = nfs_wait_on_request(req);
248cfb506e1STrond Myklebust 		else
249cfb506e1STrond Myklebust 			ret = -EAGAIN;
250e261f51fSTrond Myklebust 		nfs_release_request(req);
251e261f51fSTrond Myklebust 		if (ret != 0)
252074cc1deSTrond Myklebust 			return ERR_PTR(ret);
253587142f8STrond Myklebust 		spin_lock(&inode->i_lock);
254e261f51fSTrond Myklebust 	}
255587142f8STrond Myklebust 	spin_unlock(&inode->i_lock);
256074cc1deSTrond Myklebust 	return req;
257612c9384STrond Myklebust }
258074cc1deSTrond Myklebust 
259074cc1deSTrond Myklebust /*
260074cc1deSTrond Myklebust  * Find an associated nfs write request, and prepare to flush it out
261074cc1deSTrond Myklebust  * May return an error if the user signalled nfs_wait_on_request().
262074cc1deSTrond Myklebust  */
263074cc1deSTrond Myklebust static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
264cfb506e1STrond Myklebust 				struct page *page, bool nonblock)
265074cc1deSTrond Myklebust {
266074cc1deSTrond Myklebust 	struct nfs_page *req;
267074cc1deSTrond Myklebust 	int ret = 0;
268074cc1deSTrond Myklebust 
269cfb506e1STrond Myklebust 	req = nfs_find_and_lock_request(page, nonblock);
270074cc1deSTrond Myklebust 	if (!req)
271074cc1deSTrond Myklebust 		goto out;
272074cc1deSTrond Myklebust 	ret = PTR_ERR(req);
273074cc1deSTrond Myklebust 	if (IS_ERR(req))
274074cc1deSTrond Myklebust 		goto out;
275074cc1deSTrond Myklebust 
276074cc1deSTrond Myklebust 	ret = nfs_set_page_writeback(page);
277074cc1deSTrond Myklebust 	BUG_ON(ret != 0);
278074cc1deSTrond Myklebust 	BUG_ON(test_bit(PG_CLEAN, &req->wb_flags));
279074cc1deSTrond Myklebust 
280f8512ad0SFred Isaman 	if (!nfs_pageio_add_request(pgio, req)) {
281f8512ad0SFred Isaman 		nfs_redirty_request(req);
282074cc1deSTrond Myklebust 		ret = pgio->pg_error;
283f8512ad0SFred Isaman 	}
284074cc1deSTrond Myklebust out:
285074cc1deSTrond Myklebust 	return ret;
286e261f51fSTrond Myklebust }
287e261f51fSTrond Myklebust 
288f758c885STrond Myklebust static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio)
289f758c885STrond Myklebust {
290f758c885STrond Myklebust 	struct inode *inode = page->mapping->host;
291cfb506e1STrond Myklebust 	int ret;
292f758c885STrond Myklebust 
293f758c885STrond Myklebust 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
294f758c885STrond Myklebust 	nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1);
295f758c885STrond Myklebust 
296f758c885STrond Myklebust 	nfs_pageio_cond_complete(pgio, page->index);
2971b430beeSWu Fengguang 	ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE);
298cfb506e1STrond Myklebust 	if (ret == -EAGAIN) {
299cfb506e1STrond Myklebust 		redirty_page_for_writepage(wbc, page);
300cfb506e1STrond Myklebust 		ret = 0;
301cfb506e1STrond Myklebust 	}
302cfb506e1STrond Myklebust 	return ret;
303f758c885STrond Myklebust }
304f758c885STrond Myklebust 
305e261f51fSTrond Myklebust /*
3061da177e4SLinus Torvalds  * Write an mmapped page to the server.
3071da177e4SLinus Torvalds  */
3084d770ccfSTrond Myklebust static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc)
3091da177e4SLinus Torvalds {
310f758c885STrond Myklebust 	struct nfs_pageio_descriptor pgio;
311e261f51fSTrond Myklebust 	int err;
3121da177e4SLinus Torvalds 
313f758c885STrond Myklebust 	nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc));
314f758c885STrond Myklebust 	err = nfs_do_writepage(page, wbc, &pgio);
315f758c885STrond Myklebust 	nfs_pageio_complete(&pgio);
316f758c885STrond Myklebust 	if (err < 0)
3174d770ccfSTrond Myklebust 		return err;
318f758c885STrond Myklebust 	if (pgio.pg_error < 0)
319f758c885STrond Myklebust 		return pgio.pg_error;
320f758c885STrond Myklebust 	return 0;
3214d770ccfSTrond Myklebust }
3224d770ccfSTrond Myklebust 
3234d770ccfSTrond Myklebust int nfs_writepage(struct page *page, struct writeback_control *wbc)
3244d770ccfSTrond Myklebust {
325f758c885STrond Myklebust 	int ret;
3264d770ccfSTrond Myklebust 
327f758c885STrond Myklebust 	ret = nfs_writepage_locked(page, wbc);
3281da177e4SLinus Torvalds 	unlock_page(page);
329f758c885STrond Myklebust 	return ret;
330f758c885STrond Myklebust }
331f758c885STrond Myklebust 
332f758c885STrond Myklebust static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data)
333f758c885STrond Myklebust {
334f758c885STrond Myklebust 	int ret;
335f758c885STrond Myklebust 
336f758c885STrond Myklebust 	ret = nfs_do_writepage(page, wbc, data);
337f758c885STrond Myklebust 	unlock_page(page);
338f758c885STrond Myklebust 	return ret;
3391da177e4SLinus Torvalds }
3401da177e4SLinus Torvalds 
3411da177e4SLinus Torvalds int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
3421da177e4SLinus Torvalds {
3431da177e4SLinus Torvalds 	struct inode *inode = mapping->host;
34472cb77f4STrond Myklebust 	unsigned long *bitlock = &NFS_I(inode)->flags;
345c63c7b05STrond Myklebust 	struct nfs_pageio_descriptor pgio;
3461da177e4SLinus Torvalds 	int err;
3471da177e4SLinus Torvalds 
34872cb77f4STrond Myklebust 	/* Stop dirtying of new pages while we sync */
34972cb77f4STrond Myklebust 	err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING,
35072cb77f4STrond Myklebust 			nfs_wait_bit_killable, TASK_KILLABLE);
35172cb77f4STrond Myklebust 	if (err)
35272cb77f4STrond Myklebust 		goto out_err;
35372cb77f4STrond Myklebust 
35491d5b470SChuck Lever 	nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
35591d5b470SChuck Lever 
356c63c7b05STrond Myklebust 	nfs_pageio_init_write(&pgio, inode, wb_priority(wbc));
357f758c885STrond Myklebust 	err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
358c63c7b05STrond Myklebust 	nfs_pageio_complete(&pgio);
35972cb77f4STrond Myklebust 
36072cb77f4STrond Myklebust 	clear_bit_unlock(NFS_INO_FLUSHING, bitlock);
36172cb77f4STrond Myklebust 	smp_mb__after_clear_bit();
36272cb77f4STrond Myklebust 	wake_up_bit(bitlock, NFS_INO_FLUSHING);
36372cb77f4STrond Myklebust 
364f758c885STrond Myklebust 	if (err < 0)
36572cb77f4STrond Myklebust 		goto out_err;
36672cb77f4STrond Myklebust 	err = pgio.pg_error;
36772cb77f4STrond Myklebust 	if (err < 0)
36872cb77f4STrond Myklebust 		goto out_err;
369c63c7b05STrond Myklebust 	return 0;
37072cb77f4STrond Myklebust out_err:
37172cb77f4STrond Myklebust 	return err;
3721da177e4SLinus Torvalds }
3731da177e4SLinus Torvalds 
3741da177e4SLinus Torvalds /*
3751da177e4SLinus Torvalds  * Insert a write request into an inode
3761da177e4SLinus Torvalds  */
377d6d6dc7cSFred Isaman static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
3781da177e4SLinus Torvalds {
3791da177e4SLinus Torvalds 	struct nfs_inode *nfsi = NFS_I(inode);
380e7d39069STrond Myklebust 
381e7d39069STrond Myklebust 	/* Lock the request! */
382e7d39069STrond Myklebust 	nfs_lock_request_dontget(req);
383e7d39069STrond Myklebust 
384e7d39069STrond Myklebust 	spin_lock(&inode->i_lock);
3854d65c520STrond Myklebust 	if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
386a9a4a87aSTrond Myklebust 		inode->i_version++;
3872df485a7STrond Myklebust 	set_bit(PG_MAPPED, &req->wb_flags);
388deb7d638STrond Myklebust 	SetPagePrivate(req->wb_page);
389277459d2STrond Myklebust 	set_page_private(req->wb_page, (unsigned long)req);
3901da177e4SLinus Torvalds 	nfsi->npages++;
391c03b4024STrond Myklebust 	kref_get(&req->wb_kref);
392e7d39069STrond Myklebust 	spin_unlock(&inode->i_lock);
3931da177e4SLinus Torvalds }
3941da177e4SLinus Torvalds 
3951da177e4SLinus Torvalds /*
39689a09141SPeter Zijlstra  * Remove a write request from an inode
3971da177e4SLinus Torvalds  */
3981da177e4SLinus Torvalds static void nfs_inode_remove_request(struct nfs_page *req)
3991da177e4SLinus Torvalds {
4003d4ff43dSAl Viro 	struct inode *inode = req->wb_context->dentry->d_inode;
4011da177e4SLinus Torvalds 	struct nfs_inode *nfsi = NFS_I(inode);
4021da177e4SLinus Torvalds 
4031da177e4SLinus Torvalds 	BUG_ON (!NFS_WBACK_BUSY(req));
4041da177e4SLinus Torvalds 
405587142f8STrond Myklebust 	spin_lock(&inode->i_lock);
406277459d2STrond Myklebust 	set_page_private(req->wb_page, 0);
407deb7d638STrond Myklebust 	ClearPagePrivate(req->wb_page);
4082df485a7STrond Myklebust 	clear_bit(PG_MAPPED, &req->wb_flags);
4091da177e4SLinus Torvalds 	nfsi->npages--;
410587142f8STrond Myklebust 	spin_unlock(&inode->i_lock);
4111da177e4SLinus Torvalds 	nfs_release_request(req);
4121da177e4SLinus Torvalds }
4131da177e4SLinus Torvalds 
41461822ab5STrond Myklebust static void
4156d884e8fSFred nfs_mark_request_dirty(struct nfs_page *req)
41661822ab5STrond Myklebust {
41761822ab5STrond Myklebust 	__set_page_dirty_nobuffers(req->wb_page);
41861822ab5STrond Myklebust }
41961822ab5STrond Myklebust 
4201da177e4SLinus Torvalds #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
4218dd37758STrond Myklebust /**
4228dd37758STrond Myklebust  * nfs_request_add_commit_list - add request to a commit list
4238dd37758STrond Myklebust  * @req: pointer to a struct nfs_page
4248dd37758STrond Myklebust  * @head: commit list head
4258dd37758STrond Myklebust  *
4268dd37758STrond Myklebust  * This sets the PG_CLEAN bit, updates the inode global count of
4278dd37758STrond Myklebust  * number of outstanding requests requiring a commit as well as
4288dd37758STrond Myklebust  * the MM page stats.
4298dd37758STrond Myklebust  *
4308dd37758STrond Myklebust  * The caller must _not_ hold the inode->i_lock, but must be
4318dd37758STrond Myklebust  * holding the nfs_page lock.
4328dd37758STrond Myklebust  */
4338dd37758STrond Myklebust void
4348dd37758STrond Myklebust nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head)
4358dd37758STrond Myklebust {
4368dd37758STrond Myklebust 	struct inode *inode = req->wb_context->dentry->d_inode;
4378dd37758STrond Myklebust 
4388dd37758STrond Myklebust 	set_bit(PG_CLEAN, &(req)->wb_flags);
4398dd37758STrond Myklebust 	spin_lock(&inode->i_lock);
4408dd37758STrond Myklebust 	nfs_list_add_request(req, head);
4418dd37758STrond Myklebust 	NFS_I(inode)->ncommit++;
4428dd37758STrond Myklebust 	spin_unlock(&inode->i_lock);
4438dd37758STrond Myklebust 	inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
4448dd37758STrond Myklebust 	inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE);
4458dd37758STrond Myklebust 	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
4468dd37758STrond Myklebust }
4478dd37758STrond Myklebust EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
4488dd37758STrond Myklebust 
4498dd37758STrond Myklebust /**
4508dd37758STrond Myklebust  * nfs_request_remove_commit_list - Remove request from a commit list
4518dd37758STrond Myklebust  * @req: pointer to a nfs_page
4528dd37758STrond Myklebust  *
4538dd37758STrond Myklebust  * This clears the PG_CLEAN bit, and updates the inode global count of
4548dd37758STrond Myklebust  * number of outstanding requests requiring a commit
4558dd37758STrond Myklebust  * It does not update the MM page stats.
4568dd37758STrond Myklebust  *
4578dd37758STrond Myklebust  * The caller _must_ hold the inode->i_lock and the nfs_page lock.
4588dd37758STrond Myklebust  */
4598dd37758STrond Myklebust void
4608dd37758STrond Myklebust nfs_request_remove_commit_list(struct nfs_page *req)
4618dd37758STrond Myklebust {
4628dd37758STrond Myklebust 	struct inode *inode = req->wb_context->dentry->d_inode;
4638dd37758STrond Myklebust 
4648dd37758STrond Myklebust 	if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
4658dd37758STrond Myklebust 		return;
4668dd37758STrond Myklebust 	nfs_list_remove_request(req);
4678dd37758STrond Myklebust 	NFS_I(inode)->ncommit--;
4688dd37758STrond Myklebust }
4698dd37758STrond Myklebust EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
4708dd37758STrond Myklebust 
4718dd37758STrond Myklebust 
4721da177e4SLinus Torvalds /*
4731da177e4SLinus Torvalds  * Add a request to the inode's commit list.
4741da177e4SLinus Torvalds  */
4751da177e4SLinus Torvalds static void
476a861a1e1SFred Isaman nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
4771da177e4SLinus Torvalds {
4783d4ff43dSAl Viro 	struct inode *inode = req->wb_context->dentry->d_inode;
4791da177e4SLinus Torvalds 
4808dd37758STrond Myklebust 	if (pnfs_mark_request_commit(req, lseg))
4818dd37758STrond Myklebust 		return;
4828dd37758STrond Myklebust 	nfs_request_add_commit_list(req, &NFS_I(inode)->commit_list);
4831da177e4SLinus Torvalds }
4848e821cadSTrond Myklebust 
485d6d6dc7cSFred Isaman static void
486d6d6dc7cSFred Isaman nfs_clear_page_commit(struct page *page)
487e468bae9STrond Myklebust {
488e468bae9STrond Myklebust 	dec_zone_page_state(page, NR_UNSTABLE_NFS);
489e468bae9STrond Myklebust 	dec_bdi_stat(page->mapping->backing_dev_info, BDI_RECLAIMABLE);
490e468bae9STrond Myklebust }
491d6d6dc7cSFred Isaman 
4928dd37758STrond Myklebust static void
493d6d6dc7cSFred Isaman nfs_clear_request_commit(struct nfs_page *req)
494d6d6dc7cSFred Isaman {
4958dd37758STrond Myklebust 	if (test_bit(PG_CLEAN, &req->wb_flags)) {
4968dd37758STrond Myklebust 		struct inode *inode = req->wb_context->dentry->d_inode;
497d6d6dc7cSFred Isaman 
4988dd37758STrond Myklebust 		if (!pnfs_clear_request_commit(req)) {
4998dd37758STrond Myklebust 			spin_lock(&inode->i_lock);
5008dd37758STrond Myklebust 			nfs_request_remove_commit_list(req);
5018dd37758STrond Myklebust 			spin_unlock(&inode->i_lock);
502d6d6dc7cSFred Isaman 		}
5038dd37758STrond Myklebust 		nfs_clear_page_commit(req->wb_page);
5048dd37758STrond Myklebust 	}
505e468bae9STrond Myklebust }
506e468bae9STrond Myklebust 
5078e821cadSTrond Myklebust static inline
5088e821cadSTrond Myklebust int nfs_write_need_commit(struct nfs_write_data *data)
5098e821cadSTrond Myklebust {
510465d5243SFred Isaman 	if (data->verf.committed == NFS_DATA_SYNC)
511465d5243SFred Isaman 		return data->lseg == NULL;
512465d5243SFred Isaman 	else
5138e821cadSTrond Myklebust 		return data->verf.committed != NFS_FILE_SYNC;
5148e821cadSTrond Myklebust }
5158e821cadSTrond Myklebust 
5168e821cadSTrond Myklebust static inline
517a861a1e1SFred Isaman int nfs_reschedule_unstable_write(struct nfs_page *req,
518a861a1e1SFred Isaman 				  struct nfs_write_data *data)
5198e821cadSTrond Myklebust {
520e468bae9STrond Myklebust 	if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
521a861a1e1SFred Isaman 		nfs_mark_request_commit(req, data->lseg);
5228e821cadSTrond Myklebust 		return 1;
5238e821cadSTrond Myklebust 	}
5248e821cadSTrond Myklebust 	if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
5256d884e8fSFred 		nfs_mark_request_dirty(req);
5268e821cadSTrond Myklebust 		return 1;
5278e821cadSTrond Myklebust 	}
5288e821cadSTrond Myklebust 	return 0;
5298e821cadSTrond Myklebust }
5308e821cadSTrond Myklebust #else
5318dd37758STrond Myklebust static void
532a861a1e1SFred Isaman nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg)
5338e821cadSTrond Myklebust {
5348e821cadSTrond Myklebust }
5358e821cadSTrond Myklebust 
5368dd37758STrond Myklebust static void
537e468bae9STrond Myklebust nfs_clear_request_commit(struct nfs_page *req)
538e468bae9STrond Myklebust {
539e468bae9STrond Myklebust }
540e468bae9STrond Myklebust 
5418e821cadSTrond Myklebust static inline
5428e821cadSTrond Myklebust int nfs_write_need_commit(struct nfs_write_data *data)
5438e821cadSTrond Myklebust {
5448e821cadSTrond Myklebust 	return 0;
5458e821cadSTrond Myklebust }
5468e821cadSTrond Myklebust 
5478e821cadSTrond Myklebust static inline
548a861a1e1SFred Isaman int nfs_reschedule_unstable_write(struct nfs_page *req,
549a861a1e1SFred Isaman 				  struct nfs_write_data *data)
5508e821cadSTrond Myklebust {
5518e821cadSTrond Myklebust 	return 0;
5528e821cadSTrond Myklebust }
5531da177e4SLinus Torvalds #endif
5541da177e4SLinus Torvalds 
55547c62564STrond Myklebust #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
556fb8a1f11STrond Myklebust static int
557fb8a1f11STrond Myklebust nfs_need_commit(struct nfs_inode *nfsi)
558fb8a1f11STrond Myklebust {
559d6d6dc7cSFred Isaman 	return nfsi->ncommit > 0;
560fb8a1f11STrond Myklebust }
561fb8a1f11STrond Myklebust 
562d6d6dc7cSFred Isaman /* i_lock held by caller */
5638dd37758STrond Myklebust static int
5643b3be88dSTrond Myklebust nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
5653b3be88dSTrond Myklebust 		spinlock_t *lock)
566d6d6dc7cSFred Isaman {
567d6d6dc7cSFred Isaman 	struct nfs_page *req, *tmp;
568d6d6dc7cSFred Isaman 	int ret = 0;
569d6d6dc7cSFred Isaman 
570d6d6dc7cSFred Isaman 	list_for_each_entry_safe(req, tmp, src, wb_list) {
5718dd37758STrond Myklebust 		if (!nfs_lock_request(req))
5728dd37758STrond Myklebust 			continue;
5733b3be88dSTrond Myklebust 		if (cond_resched_lock(lock))
5743b3be88dSTrond Myklebust 			list_safe_reset_next(req, tmp, wb_list);
5758dd37758STrond Myklebust 		nfs_request_remove_commit_list(req);
5768dd37758STrond Myklebust 		nfs_list_add_request(req, dst);
577d6d6dc7cSFred Isaman 		ret++;
578d6d6dc7cSFred Isaman 		if (ret == max)
579d6d6dc7cSFred Isaman 			break;
580d6d6dc7cSFred Isaman 	}
581d6d6dc7cSFred Isaman 	return ret;
582d6d6dc7cSFred Isaman }
583d6d6dc7cSFred Isaman 
5841da177e4SLinus Torvalds /*
5851da177e4SLinus Torvalds  * nfs_scan_commit - Scan an inode for commit requests
5861da177e4SLinus Torvalds  * @inode: NFS inode to scan
5871da177e4SLinus Torvalds  * @dst: destination list
5881da177e4SLinus Torvalds  *
5891da177e4SLinus Torvalds  * Moves requests from the inode's 'commit' request list.
5901da177e4SLinus Torvalds  * The requests are *not* checked to ensure that they form a contiguous set.
5911da177e4SLinus Torvalds  */
5921da177e4SLinus Torvalds static int
593d6d6dc7cSFred Isaman nfs_scan_commit(struct inode *inode, struct list_head *dst)
5941da177e4SLinus Torvalds {
5951da177e4SLinus Torvalds 	struct nfs_inode *nfsi = NFS_I(inode);
596d6d6dc7cSFred Isaman 	int ret = 0;
597fb8a1f11STrond Myklebust 
5980d88f6e8SDave Chinner 	spin_lock(&inode->i_lock);
599d6d6dc7cSFred Isaman 	if (nfsi->ncommit > 0) {
6008dd37758STrond Myklebust 		const int max = INT_MAX;
601d6d6dc7cSFred Isaman 
6023b3be88dSTrond Myklebust 		ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max,
6033b3be88dSTrond Myklebust 				&inode->i_lock);
6043b3be88dSTrond Myklebust 		ret += pnfs_scan_commit_lists(inode, max - ret,
6053b3be88dSTrond Myklebust 				&inode->i_lock);
606d6d6dc7cSFred Isaman 	}
6070d88f6e8SDave Chinner 	spin_unlock(&inode->i_lock);
608ff778d02STrond Myklebust 	return ret;
6091da177e4SLinus Torvalds }
610d6d6dc7cSFred Isaman 
611c42de9ddSTrond Myklebust #else
612fb8a1f11STrond Myklebust static inline int nfs_need_commit(struct nfs_inode *nfsi)
613fb8a1f11STrond Myklebust {
614fb8a1f11STrond Myklebust 	return 0;
615fb8a1f11STrond Myklebust }
616fb8a1f11STrond Myklebust 
617d6d6dc7cSFred Isaman static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst)
618c42de9ddSTrond Myklebust {
619c42de9ddSTrond Myklebust 	return 0;
620c42de9ddSTrond Myklebust }
6211da177e4SLinus Torvalds #endif
6221da177e4SLinus Torvalds 
6231da177e4SLinus Torvalds /*
624e7d39069STrond Myklebust  * Search for an existing write request, and attempt to update
625e7d39069STrond Myklebust  * it to reflect a new dirty region on a given page.
6261da177e4SLinus Torvalds  *
627e7d39069STrond Myklebust  * If the attempt fails, then the existing request is flushed out
628e7d39069STrond Myklebust  * to disk.
6291da177e4SLinus Torvalds  */
630e7d39069STrond Myklebust static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
631e7d39069STrond Myklebust 		struct page *page,
632e7d39069STrond Myklebust 		unsigned int offset,
633e7d39069STrond Myklebust 		unsigned int bytes)
6341da177e4SLinus Torvalds {
635e7d39069STrond Myklebust 	struct nfs_page *req;
636e7d39069STrond Myklebust 	unsigned int rqend;
637e7d39069STrond Myklebust 	unsigned int end;
6381da177e4SLinus Torvalds 	int error;
639277459d2STrond Myklebust 
640e7d39069STrond Myklebust 	if (!PagePrivate(page))
641e7d39069STrond Myklebust 		return NULL;
642e7d39069STrond Myklebust 
643e7d39069STrond Myklebust 	end = offset + bytes;
644e7d39069STrond Myklebust 	spin_lock(&inode->i_lock);
645e7d39069STrond Myklebust 
646e7d39069STrond Myklebust 	for (;;) {
647e7d39069STrond Myklebust 		req = nfs_page_find_request_locked(page);
648e7d39069STrond Myklebust 		if (req == NULL)
649e7d39069STrond Myklebust 			goto out_unlock;
650e7d39069STrond Myklebust 
651e7d39069STrond Myklebust 		rqend = req->wb_offset + req->wb_bytes;
652e7d39069STrond Myklebust 		/*
653e7d39069STrond Myklebust 		 * Tell the caller to flush out the request if
654e7d39069STrond Myklebust 		 * the offsets are non-contiguous.
655e7d39069STrond Myklebust 		 * Note: nfs_flush_incompatible() will already
656e7d39069STrond Myklebust 		 * have flushed out requests having wrong owners.
657e7d39069STrond Myklebust 		 */
658e468bae9STrond Myklebust 		if (offset > rqend
659e7d39069STrond Myklebust 		    || end < req->wb_offset)
660e7d39069STrond Myklebust 			goto out_flushme;
661e7d39069STrond Myklebust 
6629994b62bSFred Isaman 		if (nfs_lock_request_dontget(req))
663e7d39069STrond Myklebust 			break;
664e7d39069STrond Myklebust 
665e7d39069STrond Myklebust 		/* The request is locked, so wait and then retry */
666587142f8STrond Myklebust 		spin_unlock(&inode->i_lock);
6671da177e4SLinus Torvalds 		error = nfs_wait_on_request(req);
6681da177e4SLinus Torvalds 		nfs_release_request(req);
669e7d39069STrond Myklebust 		if (error != 0)
670e7d39069STrond Myklebust 			goto out_err;
671e7d39069STrond Myklebust 		spin_lock(&inode->i_lock);
6721da177e4SLinus Torvalds 	}
6731da177e4SLinus Torvalds 
6741da177e4SLinus Torvalds 	/* Okay, the request matches. Update the region */
6751da177e4SLinus Torvalds 	if (offset < req->wb_offset) {
6761da177e4SLinus Torvalds 		req->wb_offset = offset;
6771da177e4SLinus Torvalds 		req->wb_pgbase = offset;
6781da177e4SLinus Torvalds 	}
6791da177e4SLinus Torvalds 	if (end > rqend)
6801da177e4SLinus Torvalds 		req->wb_bytes = end - req->wb_offset;
681e7d39069STrond Myklebust 	else
682e7d39069STrond Myklebust 		req->wb_bytes = rqend - req->wb_offset;
683e7d39069STrond Myklebust out_unlock:
684e7d39069STrond Myklebust 	spin_unlock(&inode->i_lock);
685ca138f36SFred Isaman 	if (req)
6868dd37758STrond Myklebust 		nfs_clear_request_commit(req);
687e7d39069STrond Myklebust 	return req;
688e7d39069STrond Myklebust out_flushme:
689e7d39069STrond Myklebust 	spin_unlock(&inode->i_lock);
690e7d39069STrond Myklebust 	nfs_release_request(req);
691e7d39069STrond Myklebust 	error = nfs_wb_page(inode, page);
692e7d39069STrond Myklebust out_err:
693e7d39069STrond Myklebust 	return ERR_PTR(error);
694e7d39069STrond Myklebust }
6951da177e4SLinus Torvalds 
696e7d39069STrond Myklebust /*
697e7d39069STrond Myklebust  * Try to update an existing write request, or create one if there is none.
698e7d39069STrond Myklebust  *
699e7d39069STrond Myklebust  * Note: Should always be called with the Page Lock held to prevent races
700e7d39069STrond Myklebust  * if we have to add a new request. Also assumes that the caller has
701e7d39069STrond Myklebust  * already called nfs_flush_incompatible() if necessary.
702e7d39069STrond Myklebust  */
703e7d39069STrond Myklebust static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx,
704e7d39069STrond Myklebust 		struct page *page, unsigned int offset, unsigned int bytes)
705e7d39069STrond Myklebust {
706e7d39069STrond Myklebust 	struct inode *inode = page->mapping->host;
707e7d39069STrond Myklebust 	struct nfs_page	*req;
708e7d39069STrond Myklebust 
709e7d39069STrond Myklebust 	req = nfs_try_to_update_request(inode, page, offset, bytes);
710e7d39069STrond Myklebust 	if (req != NULL)
711e7d39069STrond Myklebust 		goto out;
712e7d39069STrond Myklebust 	req = nfs_create_request(ctx, inode, page, offset, bytes);
713e7d39069STrond Myklebust 	if (IS_ERR(req))
714e7d39069STrond Myklebust 		goto out;
715d6d6dc7cSFred Isaman 	nfs_inode_add_request(inode, req);
716efc91ed0STrond Myklebust out:
71761e930a9STrond Myklebust 	return req;
7181da177e4SLinus Torvalds }
7191da177e4SLinus Torvalds 
720e7d39069STrond Myklebust static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
721e7d39069STrond Myklebust 		unsigned int offset, unsigned int count)
722e7d39069STrond Myklebust {
723e7d39069STrond Myklebust 	struct nfs_page	*req;
724e7d39069STrond Myklebust 
725e7d39069STrond Myklebust 	req = nfs_setup_write_request(ctx, page, offset, count);
726e7d39069STrond Myklebust 	if (IS_ERR(req))
727e7d39069STrond Myklebust 		return PTR_ERR(req);
728e7d39069STrond Myklebust 	/* Update file length */
729e7d39069STrond Myklebust 	nfs_grow_file(page, offset, count);
730e7d39069STrond Myklebust 	nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
731a6305ddbSTrond Myklebust 	nfs_mark_request_dirty(req);
7329994b62bSFred Isaman 	nfs_unlock_request(req);
733e7d39069STrond Myklebust 	return 0;
734e7d39069STrond Myklebust }
735e7d39069STrond Myklebust 
7361da177e4SLinus Torvalds int nfs_flush_incompatible(struct file *file, struct page *page)
7371da177e4SLinus Torvalds {
738cd3758e3STrond Myklebust 	struct nfs_open_context *ctx = nfs_file_open_context(file);
7391da177e4SLinus Torvalds 	struct nfs_page	*req;
7401a54533eSTrond Myklebust 	int do_flush, status;
7411da177e4SLinus Torvalds 	/*
7421da177e4SLinus Torvalds 	 * Look for a request corresponding to this page. If there
7431da177e4SLinus Torvalds 	 * is one, and it belongs to another file, we flush it out
7441da177e4SLinus Torvalds 	 * before we try to copy anything into the page. Do this
7451da177e4SLinus Torvalds 	 * due to the lack of an ACCESS-type call in NFSv2.
7461da177e4SLinus Torvalds 	 * Also do the same if we find a request from an existing
7471da177e4SLinus Torvalds 	 * dropped page.
7481da177e4SLinus Torvalds 	 */
7491a54533eSTrond Myklebust 	do {
750277459d2STrond Myklebust 		req = nfs_page_find_request(page);
7511a54533eSTrond Myklebust 		if (req == NULL)
7521a54533eSTrond Myklebust 			return 0;
753f11ac8dbSTrond Myklebust 		do_flush = req->wb_page != page || req->wb_context != ctx ||
754f11ac8dbSTrond Myklebust 			req->wb_lock_context->lockowner != current->files ||
755f11ac8dbSTrond Myklebust 			req->wb_lock_context->pid != current->tgid;
7561da177e4SLinus Torvalds 		nfs_release_request(req);
7571a54533eSTrond Myklebust 		if (!do_flush)
7581a54533eSTrond Myklebust 			return 0;
759277459d2STrond Myklebust 		status = nfs_wb_page(page->mapping->host, page);
7601a54533eSTrond Myklebust 	} while (status == 0);
7611a54533eSTrond Myklebust 	return status;
7621da177e4SLinus Torvalds }
7631da177e4SLinus Torvalds 
7641da177e4SLinus Torvalds /*
7655d47a356STrond Myklebust  * If the page cache is marked as unsafe or invalid, then we can't rely on
7665d47a356STrond Myklebust  * the PageUptodate() flag. In this case, we will need to turn off
7675d47a356STrond Myklebust  * write optimisations that depend on the page contents being correct.
7685d47a356STrond Myklebust  */
7695d47a356STrond Myklebust static int nfs_write_pageuptodate(struct page *page, struct inode *inode)
7705d47a356STrond Myklebust {
7715d47a356STrond Myklebust 	return PageUptodate(page) &&
7725d47a356STrond Myklebust 		!(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA));
7735d47a356STrond Myklebust }
7745d47a356STrond Myklebust 
7755d47a356STrond Myklebust /*
7761da177e4SLinus Torvalds  * Update and possibly write a cached page of an NFS file.
7771da177e4SLinus Torvalds  *
7781da177e4SLinus Torvalds  * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
7791da177e4SLinus Torvalds  * things with a page scheduled for an RPC call (e.g. invalidate it).
7801da177e4SLinus Torvalds  */
7811da177e4SLinus Torvalds int nfs_updatepage(struct file *file, struct page *page,
7821da177e4SLinus Torvalds 		unsigned int offset, unsigned int count)
7831da177e4SLinus Torvalds {
784cd3758e3STrond Myklebust 	struct nfs_open_context *ctx = nfs_file_open_context(file);
7851da177e4SLinus Torvalds 	struct inode	*inode = page->mapping->host;
7861da177e4SLinus Torvalds 	int		status = 0;
7871da177e4SLinus Torvalds 
78891d5b470SChuck Lever 	nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
78991d5b470SChuck Lever 
79048186c7dSChuck Lever 	dprintk("NFS:       nfs_updatepage(%s/%s %d@%lld)\n",
79101cce933SJosef "Jeff" Sipek 		file->f_path.dentry->d_parent->d_name.name,
79201cce933SJosef "Jeff" Sipek 		file->f_path.dentry->d_name.name, count,
7930bbacc40SChuck Lever 		(long long)(page_offset(page) + offset));
7941da177e4SLinus Torvalds 
7951da177e4SLinus Torvalds 	/* If we're not using byte range locks, and we know the page
7965d47a356STrond Myklebust 	 * is up to date, it may be more efficient to extend the write
7975d47a356STrond Myklebust 	 * to cover the entire page in order to avoid fragmentation
7985d47a356STrond Myklebust 	 * inefficiencies.
7991da177e4SLinus Torvalds 	 */
8005d47a356STrond Myklebust 	if (nfs_write_pageuptodate(page, inode) &&
8015d47a356STrond Myklebust 			inode->i_flock == NULL &&
8026b2f3d1fSChristoph Hellwig 			!(file->f_flags & O_DSYNC)) {
80349a70f27STrond Myklebust 		count = max(count + offset, nfs_page_length(page));
8041da177e4SLinus Torvalds 		offset = 0;
8051da177e4SLinus Torvalds 	}
8061da177e4SLinus Torvalds 
807e21195a7STrond Myklebust 	status = nfs_writepage_setup(ctx, page, offset, count);
80803fa9e84STrond Myklebust 	if (status < 0)
80903fa9e84STrond Myklebust 		nfs_set_pageerror(page);
81059b7c05fSTrond Myklebust 	else
81159b7c05fSTrond Myklebust 		__set_page_dirty_nobuffers(page);
8121da177e4SLinus Torvalds 
81348186c7dSChuck Lever 	dprintk("NFS:       nfs_updatepage returns %d (isize %lld)\n",
8141da177e4SLinus Torvalds 			status, (long long)i_size_read(inode));
8151da177e4SLinus Torvalds 	return status;
8161da177e4SLinus Torvalds }
8171da177e4SLinus Torvalds 
818a861a1e1SFred Isaman static void nfs_writepage_release(struct nfs_page *req,
819a861a1e1SFred Isaman 				  struct nfs_write_data *data)
8201da177e4SLinus Torvalds {
821a6305ddbSTrond Myklebust 	struct page *page = req->wb_page;
8228e821cadSTrond Myklebust 
823a861a1e1SFred Isaman 	if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
8241da177e4SLinus Torvalds 		nfs_inode_remove_request(req);
8259994b62bSFred Isaman 	nfs_unlock_request(req);
826a6305ddbSTrond Myklebust 	nfs_end_page_writeback(page);
8271da177e4SLinus Torvalds }
8281da177e4SLinus Torvalds 
8293ff7576dSTrond Myklebust static int flush_task_priority(int how)
8301da177e4SLinus Torvalds {
8311da177e4SLinus Torvalds 	switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
8321da177e4SLinus Torvalds 		case FLUSH_HIGHPRI:
8331da177e4SLinus Torvalds 			return RPC_PRIORITY_HIGH;
8341da177e4SLinus Torvalds 		case FLUSH_LOWPRI:
8351da177e4SLinus Torvalds 			return RPC_PRIORITY_LOW;
8361da177e4SLinus Torvalds 	}
8371da177e4SLinus Torvalds 	return RPC_PRIORITY_NORMAL;
8381da177e4SLinus Torvalds }
8391da177e4SLinus Torvalds 
840a69aef14SFred Isaman int nfs_initiate_write(struct nfs_write_data *data,
841d138d5d1SAndy Adamson 		       struct rpc_clnt *clnt,
842788e7a89STrond Myklebust 		       const struct rpc_call_ops *call_ops,
8431da177e4SLinus Torvalds 		       int how)
8441da177e4SLinus Torvalds {
845d138d5d1SAndy Adamson 	struct inode *inode = data->inode;
8463ff7576dSTrond Myklebust 	int priority = flush_task_priority(how);
84707737691STrond Myklebust 	struct rpc_task *task;
848bdc7f021STrond Myklebust 	struct rpc_message msg = {
849bdc7f021STrond Myklebust 		.rpc_argp = &data->args,
850bdc7f021STrond Myklebust 		.rpc_resp = &data->res,
851d138d5d1SAndy Adamson 		.rpc_cred = data->cred,
852bdc7f021STrond Myklebust 	};
85384115e1cSTrond Myklebust 	struct rpc_task_setup task_setup_data = {
854d138d5d1SAndy Adamson 		.rpc_client = clnt,
85507737691STrond Myklebust 		.task = &data->task,
856bdc7f021STrond Myklebust 		.rpc_message = &msg,
85784115e1cSTrond Myklebust 		.callback_ops = call_ops,
85884115e1cSTrond Myklebust 		.callback_data = data,
859101070caSTrond Myklebust 		.workqueue = nfsiod_workqueue,
8602c61be0aSTrond Myklebust 		.flags = RPC_TASK_ASYNC,
8613ff7576dSTrond Myklebust 		.priority = priority,
86284115e1cSTrond Myklebust 	};
8632c61be0aSTrond Myklebust 	int ret = 0;
8641da177e4SLinus Torvalds 
865d138d5d1SAndy Adamson 	/* Set up the initial task struct.  */
866d138d5d1SAndy Adamson 	NFS_PROTO(inode)->write_setup(data, &msg);
867d138d5d1SAndy Adamson 
868d138d5d1SAndy Adamson 	dprintk("NFS: %5u initiated write call "
869d138d5d1SAndy Adamson 		"(req %s/%lld, %u bytes @ offset %llu)\n",
870d138d5d1SAndy Adamson 		data->task.tk_pid,
871d138d5d1SAndy Adamson 		inode->i_sb->s_id,
872d138d5d1SAndy Adamson 		(long long)NFS_FILEID(inode),
873d138d5d1SAndy Adamson 		data->args.count,
874d138d5d1SAndy Adamson 		(unsigned long long)data->args.offset);
875d138d5d1SAndy Adamson 
876d138d5d1SAndy Adamson 	task = rpc_run_task(&task_setup_data);
877d138d5d1SAndy Adamson 	if (IS_ERR(task)) {
878d138d5d1SAndy Adamson 		ret = PTR_ERR(task);
879d138d5d1SAndy Adamson 		goto out;
880d138d5d1SAndy Adamson 	}
881d138d5d1SAndy Adamson 	if (how & FLUSH_SYNC) {
882d138d5d1SAndy Adamson 		ret = rpc_wait_for_completion_task(task);
883d138d5d1SAndy Adamson 		if (ret == 0)
884d138d5d1SAndy Adamson 			ret = task->tk_status;
885d138d5d1SAndy Adamson 	}
886d138d5d1SAndy Adamson 	rpc_put_task(task);
887d138d5d1SAndy Adamson out:
888d138d5d1SAndy Adamson 	return ret;
889d138d5d1SAndy Adamson }
890a69aef14SFred Isaman EXPORT_SYMBOL_GPL(nfs_initiate_write);
891d138d5d1SAndy Adamson 
892d138d5d1SAndy Adamson /*
893d138d5d1SAndy Adamson  * Set up the argument/result storage required for the RPC call.
894d138d5d1SAndy Adamson  */
8956e4efd56STrond Myklebust static void nfs_write_rpcsetup(struct nfs_page *req,
896d138d5d1SAndy Adamson 		struct nfs_write_data *data,
897d138d5d1SAndy Adamson 		unsigned int count, unsigned int offset,
898d138d5d1SAndy Adamson 		int how)
899d138d5d1SAndy Adamson {
9003d4ff43dSAl Viro 	struct inode *inode = req->wb_context->dentry->d_inode;
901d138d5d1SAndy Adamson 
9021da177e4SLinus Torvalds 	/* Set up the RPC argument and reply structs
9031da177e4SLinus Torvalds 	 * NB: take care not to mess about with data->commit et al. */
9041da177e4SLinus Torvalds 
9051da177e4SLinus Torvalds 	data->req = req;
9063d4ff43dSAl Viro 	data->inode = inode = req->wb_context->dentry->d_inode;
907d138d5d1SAndy Adamson 	data->cred = req->wb_context->cred;
9081da177e4SLinus Torvalds 
9091da177e4SLinus Torvalds 	data->args.fh     = NFS_FH(inode);
9101da177e4SLinus Torvalds 	data->args.offset = req_offset(req) + offset;
9112bea038cSBoaz Harrosh 	/* pnfs_set_layoutcommit needs this */
9122bea038cSBoaz Harrosh 	data->mds_offset = data->args.offset;
9131da177e4SLinus Torvalds 	data->args.pgbase = req->wb_pgbase + offset;
9141da177e4SLinus Torvalds 	data->args.pages  = data->pagevec;
9151da177e4SLinus Torvalds 	data->args.count  = count;
916383ba719STrond Myklebust 	data->args.context = get_nfs_open_context(req->wb_context);
917f11ac8dbSTrond Myklebust 	data->args.lock_context = req->wb_lock_context;
918bdc7f021STrond Myklebust 	data->args.stable  = NFS_UNSTABLE;
91987ed5eb4STrond Myklebust 	switch (how & (FLUSH_STABLE | FLUSH_COND_STABLE)) {
92087ed5eb4STrond Myklebust 	case 0:
92187ed5eb4STrond Myklebust 		break;
92287ed5eb4STrond Myklebust 	case FLUSH_COND_STABLE:
92387ed5eb4STrond Myklebust 		if (nfs_need_commit(NFS_I(inode)))
92487ed5eb4STrond Myklebust 			break;
92587ed5eb4STrond Myklebust 	default:
926bdc7f021STrond Myklebust 		data->args.stable = NFS_FILE_SYNC;
927bdc7f021STrond Myklebust 	}
9281da177e4SLinus Torvalds 
9291da177e4SLinus Torvalds 	data->res.fattr   = &data->fattr;
9301da177e4SLinus Torvalds 	data->res.count   = count;
9311da177e4SLinus Torvalds 	data->res.verf    = &data->verf;
9320e574af1STrond Myklebust 	nfs_fattr_init(&data->fattr);
9336e4efd56STrond Myklebust }
9341da177e4SLinus Torvalds 
9356e4efd56STrond Myklebust static int nfs_do_write(struct nfs_write_data *data,
9366e4efd56STrond Myklebust 		const struct rpc_call_ops *call_ops,
9376e4efd56STrond Myklebust 		int how)
9386e4efd56STrond Myklebust {
9395f00bcb3SStephen Rothwell 	struct inode *inode = data->args.context->dentry->d_inode;
9400382b744SAndy Adamson 
941d138d5d1SAndy Adamson 	return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how);
9421da177e4SLinus Torvalds }
9431da177e4SLinus Torvalds 
944275acaafSTrond Myklebust static int nfs_do_multiple_writes(struct list_head *head,
945275acaafSTrond Myklebust 		const struct rpc_call_ops *call_ops,
946275acaafSTrond Myklebust 		int how)
947275acaafSTrond Myklebust {
948275acaafSTrond Myklebust 	struct nfs_write_data *data;
949275acaafSTrond Myklebust 	int ret = 0;
950275acaafSTrond Myklebust 
951275acaafSTrond Myklebust 	while (!list_empty(head)) {
952275acaafSTrond Myklebust 		int ret2;
953275acaafSTrond Myklebust 
954275acaafSTrond Myklebust 		data = list_entry(head->next, struct nfs_write_data, list);
955275acaafSTrond Myklebust 		list_del_init(&data->list);
956275acaafSTrond Myklebust 
957dce81290STrond Myklebust 		ret2 = nfs_do_write(data, call_ops, how);
958275acaafSTrond Myklebust 		 if (ret == 0)
959275acaafSTrond Myklebust 			 ret = ret2;
960275acaafSTrond Myklebust 	}
961275acaafSTrond Myklebust 	return ret;
962275acaafSTrond Myklebust }
963275acaafSTrond Myklebust 
9646d884e8fSFred /* If a nfs_flush_* function fails, it should remove reqs from @head and
9656d884e8fSFred  * call this on each, which will prepare them to be retried on next
9666d884e8fSFred  * writeback using standard nfs.
9676d884e8fSFred  */
9686d884e8fSFred static void nfs_redirty_request(struct nfs_page *req)
9696d884e8fSFred {
970a6305ddbSTrond Myklebust 	struct page *page = req->wb_page;
971a6305ddbSTrond Myklebust 
9726d884e8fSFred 	nfs_mark_request_dirty(req);
9739994b62bSFred Isaman 	nfs_unlock_request(req);
974a6305ddbSTrond Myklebust 	nfs_end_page_writeback(page);
9756d884e8fSFred }
9766d884e8fSFred 
9771da177e4SLinus Torvalds /*
9781da177e4SLinus Torvalds  * Generate multiple small requests to write out a single
9791da177e4SLinus Torvalds  * contiguous dirty area on one page.
9801da177e4SLinus Torvalds  */
981275acaafSTrond Myklebust static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res)
9821da177e4SLinus Torvalds {
983c76069bdSFred Isaman 	struct nfs_page *req = nfs_list_entry(desc->pg_list.next);
9841da177e4SLinus Torvalds 	struct page *page = req->wb_page;
9851da177e4SLinus Torvalds 	struct nfs_write_data *data;
986d097971dSTrond Myklebust 	size_t wsize = desc->pg_bsize, nbytes;
987e9f7bee1STrond Myklebust 	unsigned int offset;
9881da177e4SLinus Torvalds 	int requests = 0;
989dbae4c73STrond Myklebust 	int ret = 0;
9901da177e4SLinus Torvalds 
9911da177e4SLinus Torvalds 	nfs_list_remove_request(req);
9921da177e4SLinus Torvalds 
993b31268acSTrond Myklebust 	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
994b31268acSTrond Myklebust 	    (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit ||
995b31268acSTrond Myklebust 	     desc->pg_count > wsize))
996b31268acSTrond Myklebust 		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
997b31268acSTrond Myklebust 
998b31268acSTrond Myklebust 
999275acaafSTrond Myklebust 	offset = 0;
1000c76069bdSFred Isaman 	nbytes = desc->pg_count;
1001e9f7bee1STrond Myklebust 	do {
1002e9f7bee1STrond Myklebust 		size_t len = min(nbytes, wsize);
1003e9f7bee1STrond Myklebust 
10048d5658c9STrond Myklebust 		data = nfs_writedata_alloc(1);
10051da177e4SLinus Torvalds 		if (!data)
10061da177e4SLinus Torvalds 			goto out_bad;
1007275acaafSTrond Myklebust 		data->pagevec[0] = page;
1008f13c3620STrond Myklebust 		nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags);
1009275acaafSTrond Myklebust 		list_add(&data->list, res);
10101da177e4SLinus Torvalds 		requests++;
1011e9f7bee1STrond Myklebust 		nbytes -= len;
1012275acaafSTrond Myklebust 		offset += len;
1013e9f7bee1STrond Myklebust 	} while (nbytes != 0);
10141da177e4SLinus Torvalds 	atomic_set(&req->wb_complete, requests);
101550828d7eSTrond Myklebust 	desc->pg_rpc_callops = &nfs_write_partial_ops;
1016dbae4c73STrond Myklebust 	return ret;
10171da177e4SLinus Torvalds 
10181da177e4SLinus Torvalds out_bad:
1019275acaafSTrond Myklebust 	while (!list_empty(res)) {
1020275acaafSTrond Myklebust 		data = list_entry(res->next, struct nfs_write_data, list);
10216e4efd56STrond Myklebust 		list_del(&data->list);
1022*8ccd271fSFred Isaman 		nfs_writedata_release(data);
10231da177e4SLinus Torvalds 	}
102461822ab5STrond Myklebust 	nfs_redirty_request(req);
10251da177e4SLinus Torvalds 	return -ENOMEM;
10261da177e4SLinus Torvalds }
10271da177e4SLinus Torvalds 
10281da177e4SLinus Torvalds /*
10291da177e4SLinus Torvalds  * Create an RPC task for the given write request and kick it.
10301da177e4SLinus Torvalds  * The page must have been locked by the caller.
10311da177e4SLinus Torvalds  *
10321da177e4SLinus Torvalds  * It may happen that the page we're passed is not marked dirty.
10331da177e4SLinus Torvalds  * This is the case if nfs_updatepage detects a conflicting request
10341da177e4SLinus Torvalds  * that has been written but not committed.
10351da177e4SLinus Torvalds  */
1036275acaafSTrond Myklebust static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res)
10371da177e4SLinus Torvalds {
10381da177e4SLinus Torvalds 	struct nfs_page		*req;
10391da177e4SLinus Torvalds 	struct page		**pages;
10401da177e4SLinus Torvalds 	struct nfs_write_data	*data;
1041c76069bdSFred Isaman 	struct list_head *head = &desc->pg_list;
10423b609184SPeng Tao 	int ret = 0;
10431da177e4SLinus Torvalds 
1044c76069bdSFred Isaman 	data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base,
1045c76069bdSFred Isaman 						      desc->pg_count));
104644b83799SFred Isaman 	if (!data) {
104744b83799SFred Isaman 		while (!list_empty(head)) {
104844b83799SFred Isaman 			req = nfs_list_entry(head->next);
104944b83799SFred Isaman 			nfs_list_remove_request(req);
105044b83799SFred Isaman 			nfs_redirty_request(req);
105144b83799SFred Isaman 		}
105244b83799SFred Isaman 		ret = -ENOMEM;
105344b83799SFred Isaman 		goto out;
105444b83799SFred Isaman 	}
10551da177e4SLinus Torvalds 	pages = data->pagevec;
10561da177e4SLinus Torvalds 	while (!list_empty(head)) {
10571da177e4SLinus Torvalds 		req = nfs_list_entry(head->next);
10581da177e4SLinus Torvalds 		nfs_list_remove_request(req);
10591da177e4SLinus Torvalds 		nfs_list_add_request(req, &data->pages);
10601da177e4SLinus Torvalds 		*pages++ = req->wb_page;
10611da177e4SLinus Torvalds 	}
10621da177e4SLinus Torvalds 	req = nfs_list_entry(data->pages.next);
10631da177e4SLinus Torvalds 
1064b31268acSTrond Myklebust 	if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1065b31268acSTrond Myklebust 	    (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit))
1066b31268acSTrond Myklebust 		desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1067b31268acSTrond Myklebust 
10681da177e4SLinus Torvalds 	/* Set up the argument struct */
10696e4efd56STrond Myklebust 	nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags);
1070275acaafSTrond Myklebust 	list_add(&data->list, res);
107150828d7eSTrond Myklebust 	desc->pg_rpc_callops = &nfs_write_full_ops;
107244b83799SFred Isaman out:
107344b83799SFred Isaman 	return ret;
10741da177e4SLinus Torvalds }
10751da177e4SLinus Torvalds 
1076dce81290STrond Myklebust int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head)
1077dce81290STrond Myklebust {
1078dce81290STrond Myklebust 	if (desc->pg_bsize < PAGE_CACHE_SIZE)
1079dce81290STrond Myklebust 		return nfs_flush_multi(desc, head);
1080dce81290STrond Myklebust 	return nfs_flush_one(desc, head);
1081dce81290STrond Myklebust }
1082dce81290STrond Myklebust 
1083dce81290STrond Myklebust static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
10841751c363STrond Myklebust {
1085275acaafSTrond Myklebust 	LIST_HEAD(head);
1086275acaafSTrond Myklebust 	int ret;
1087275acaafSTrond Myklebust 
1088dce81290STrond Myklebust 	ret = nfs_generic_flush(desc, &head);
1089275acaafSTrond Myklebust 	if (ret == 0)
109050828d7eSTrond Myklebust 		ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops,
1091dce81290STrond Myklebust 				desc->pg_ioflags);
1092275acaafSTrond Myklebust 	return ret;
10931751c363STrond Myklebust }
10941751c363STrond Myklebust 
10951751c363STrond Myklebust static const struct nfs_pageio_ops nfs_pageio_write_ops = {
10961751c363STrond Myklebust 	.pg_test = nfs_generic_pg_test,
10971751c363STrond Myklebust 	.pg_doio = nfs_generic_pg_writepages,
10981751c363STrond Myklebust };
10991751c363STrond Myklebust 
1100e2fecb21STrond Myklebust void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
11011751c363STrond Myklebust 				  struct inode *inode, int ioflags)
11021751c363STrond Myklebust {
11031751c363STrond Myklebust 	nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops,
11041751c363STrond Myklebust 				NFS_SERVER(inode)->wsize, ioflags);
11051751c363STrond Myklebust }
11061751c363STrond Myklebust 
1107dce81290STrond Myklebust void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1108dce81290STrond Myklebust {
1109dce81290STrond Myklebust 	pgio->pg_ops = &nfs_pageio_write_ops;
1110dce81290STrond Myklebust 	pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1111dce81290STrond Myklebust }
11121f945357STrond Myklebust EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1113dce81290STrond Myklebust 
1114c63c7b05STrond Myklebust static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1115c63c7b05STrond Myklebust 				  struct inode *inode, int ioflags)
11161da177e4SLinus Torvalds {
11171751c363STrond Myklebust 	if (!pnfs_pageio_init_write(pgio, inode, ioflags))
11181751c363STrond Myklebust 		nfs_pageio_init_write_mds(pgio, inode, ioflags);
11191da177e4SLinus Torvalds }
11201da177e4SLinus Torvalds 
11211da177e4SLinus Torvalds /*
11221da177e4SLinus Torvalds  * Handle a write reply that flushed part of a page.
11231da177e4SLinus Torvalds  */
1124788e7a89STrond Myklebust static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
11251da177e4SLinus Torvalds {
1126788e7a89STrond Myklebust 	struct nfs_write_data	*data = calldata;
11271da177e4SLinus Torvalds 
112848186c7dSChuck Lever 	dprintk("NFS: %5u write(%s/%lld %d@%lld)",
112948186c7dSChuck Lever 		task->tk_pid,
11303d4ff43dSAl Viro 		data->req->wb_context->dentry->d_inode->i_sb->s_id,
113148186c7dSChuck Lever 		(long long)
11323d4ff43dSAl Viro 		  NFS_FILEID(data->req->wb_context->dentry->d_inode),
113348186c7dSChuck Lever 		data->req->wb_bytes, (long long)req_offset(data->req));
11341da177e4SLinus Torvalds 
1135c9d8f89dSTrond Myklebust 	nfs_writeback_done(task, data);
1136c9d8f89dSTrond Myklebust }
1137788e7a89STrond Myklebust 
1138c9d8f89dSTrond Myklebust static void nfs_writeback_release_partial(void *calldata)
1139c9d8f89dSTrond Myklebust {
1140c9d8f89dSTrond Myklebust 	struct nfs_write_data	*data = calldata;
1141c9d8f89dSTrond Myklebust 	struct nfs_page		*req = data->req;
1142c9d8f89dSTrond Myklebust 	struct page		*page = req->wb_page;
1143c9d8f89dSTrond Myklebust 	int status = data->task.tk_status;
1144c9d8f89dSTrond Myklebust 
1145c9d8f89dSTrond Myklebust 	if (status < 0) {
1146a301b777STrond Myklebust 		nfs_set_pageerror(page);
1147c9d8f89dSTrond Myklebust 		nfs_context_set_write_error(req->wb_context, status);
1148c9d8f89dSTrond Myklebust 		dprintk(", error = %d\n", status);
11498e821cadSTrond Myklebust 		goto out;
11508e821cadSTrond Myklebust 	}
11518e821cadSTrond Myklebust 
11528e821cadSTrond Myklebust 	if (nfs_write_need_commit(data)) {
1153587142f8STrond Myklebust 		struct inode *inode = page->mapping->host;
11548e821cadSTrond Myklebust 
1155587142f8STrond Myklebust 		spin_lock(&inode->i_lock);
11568e821cadSTrond Myklebust 		if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
11578e821cadSTrond Myklebust 			/* Do nothing we need to resend the writes */
11588e821cadSTrond Myklebust 		} else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
11591da177e4SLinus Torvalds 			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
11601da177e4SLinus Torvalds 			dprintk(" defer commit\n");
11611da177e4SLinus Torvalds 		} else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
11628e821cadSTrond Myklebust 			set_bit(PG_NEED_RESCHED, &req->wb_flags);
11638e821cadSTrond Myklebust 			clear_bit(PG_NEED_COMMIT, &req->wb_flags);
11641da177e4SLinus Torvalds 			dprintk(" server reboot detected\n");
11651da177e4SLinus Torvalds 		}
1166587142f8STrond Myklebust 		spin_unlock(&inode->i_lock);
11671da177e4SLinus Torvalds 	} else
11681da177e4SLinus Torvalds 		dprintk(" OK\n");
11691da177e4SLinus Torvalds 
11708e821cadSTrond Myklebust out:
11711da177e4SLinus Torvalds 	if (atomic_dec_and_test(&req->wb_complete))
1172a861a1e1SFred Isaman 		nfs_writepage_release(req, data);
1173c9d8f89dSTrond Myklebust 	nfs_writedata_release(calldata);
11741da177e4SLinus Torvalds }
11751da177e4SLinus Torvalds 
1176def6ed7eSAndy Adamson void nfs_write_prepare(struct rpc_task *task, void *calldata)
1177def6ed7eSAndy Adamson {
1178def6ed7eSAndy Adamson 	struct nfs_write_data *data = calldata;
1179c6cb80d0SBryan Schumaker 	NFS_PROTO(data->inode)->write_rpc_prepare(task, data);
1180def6ed7eSAndy Adamson }
1181def6ed7eSAndy Adamson 
1182788e7a89STrond Myklebust static const struct rpc_call_ops nfs_write_partial_ops = {
1183def6ed7eSAndy Adamson 	.rpc_call_prepare = nfs_write_prepare,
1184788e7a89STrond Myklebust 	.rpc_call_done = nfs_writeback_done_partial,
1185c9d8f89dSTrond Myklebust 	.rpc_release = nfs_writeback_release_partial,
1186788e7a89STrond Myklebust };
1187788e7a89STrond Myklebust 
11881da177e4SLinus Torvalds /*
11891da177e4SLinus Torvalds  * Handle a write reply that flushes a whole page.
11901da177e4SLinus Torvalds  *
11911da177e4SLinus Torvalds  * FIXME: There is an inherent race with invalidate_inode_pages and
11921da177e4SLinus Torvalds  *	  writebacks since the page->count is kept > 1 for as long
11931da177e4SLinus Torvalds  *	  as the page has a write request pending.
11941da177e4SLinus Torvalds  */
1195788e7a89STrond Myklebust static void nfs_writeback_done_full(struct rpc_task *task, void *calldata)
11961da177e4SLinus Torvalds {
1197788e7a89STrond Myklebust 	struct nfs_write_data	*data = calldata;
11981da177e4SLinus Torvalds 
1199c9d8f89dSTrond Myklebust 	nfs_writeback_done(task, data);
1200c9d8f89dSTrond Myklebust }
1201c9d8f89dSTrond Myklebust 
1202c9d8f89dSTrond Myklebust static void nfs_writeback_release_full(void *calldata)
1203c9d8f89dSTrond Myklebust {
1204c9d8f89dSTrond Myklebust 	struct nfs_write_data	*data = calldata;
1205e2fecb21STrond Myklebust 	int status = data->task.tk_status;
1206788e7a89STrond Myklebust 
12071da177e4SLinus Torvalds 	/* Update attributes as result of writeback. */
12081da177e4SLinus Torvalds 	while (!list_empty(&data->pages)) {
1209c9d8f89dSTrond Myklebust 		struct nfs_page *req = nfs_list_entry(data->pages.next);
1210c9d8f89dSTrond Myklebust 		struct page *page = req->wb_page;
1211c9d8f89dSTrond Myklebust 
12121da177e4SLinus Torvalds 		nfs_list_remove_request(req);
12131da177e4SLinus Torvalds 
121448186c7dSChuck Lever 		dprintk("NFS: %5u write (%s/%lld %d@%lld)",
121548186c7dSChuck Lever 			data->task.tk_pid,
12163d4ff43dSAl Viro 			req->wb_context->dentry->d_inode->i_sb->s_id,
12173d4ff43dSAl Viro 			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
12181da177e4SLinus Torvalds 			req->wb_bytes,
12191da177e4SLinus Torvalds 			(long long)req_offset(req));
12201da177e4SLinus Torvalds 
1221c9d8f89dSTrond Myklebust 		if (status < 0) {
1222a301b777STrond Myklebust 			nfs_set_pageerror(page);
1223c9d8f89dSTrond Myklebust 			nfs_context_set_write_error(req->wb_context, status);
1224c9d8f89dSTrond Myklebust 			dprintk(", error = %d\n", status);
12258e821cadSTrond Myklebust 			goto remove_request;
12261da177e4SLinus Torvalds 		}
12271da177e4SLinus Torvalds 
12288e821cadSTrond Myklebust 		if (nfs_write_need_commit(data)) {
12291da177e4SLinus Torvalds 			memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1230a861a1e1SFred Isaman 			nfs_mark_request_commit(req, data->lseg);
12311da177e4SLinus Torvalds 			dprintk(" marked for commit\n");
12328e821cadSTrond Myklebust 			goto next;
12338e821cadSTrond Myklebust 		}
12348e821cadSTrond Myklebust 		dprintk(" OK\n");
12358e821cadSTrond Myklebust remove_request:
12361da177e4SLinus Torvalds 		nfs_inode_remove_request(req);
12371da177e4SLinus Torvalds 	next:
12389994b62bSFred Isaman 		nfs_unlock_request(req);
1239a6305ddbSTrond Myklebust 		nfs_end_page_writeback(page);
12401da177e4SLinus Torvalds 	}
1241c9d8f89dSTrond Myklebust 	nfs_writedata_release(calldata);
12421da177e4SLinus Torvalds }
12431da177e4SLinus Torvalds 
1244788e7a89STrond Myklebust static const struct rpc_call_ops nfs_write_full_ops = {
1245def6ed7eSAndy Adamson 	.rpc_call_prepare = nfs_write_prepare,
1246788e7a89STrond Myklebust 	.rpc_call_done = nfs_writeback_done_full,
1247c9d8f89dSTrond Myklebust 	.rpc_release = nfs_writeback_release_full,
1248788e7a89STrond Myklebust };
1249788e7a89STrond Myklebust 
1250788e7a89STrond Myklebust 
12511da177e4SLinus Torvalds /*
12521da177e4SLinus Torvalds  * This function is called when the WRITE call is complete.
12531da177e4SLinus Torvalds  */
125413602896SFred Isaman void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
12551da177e4SLinus Torvalds {
12561da177e4SLinus Torvalds 	struct nfs_writeargs	*argp = &data->args;
12571da177e4SLinus Torvalds 	struct nfs_writeres	*resp = &data->res;
1258788e7a89STrond Myklebust 	int status;
12591da177e4SLinus Torvalds 
1260a3f565b1SChuck Lever 	dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
12611da177e4SLinus Torvalds 		task->tk_pid, task->tk_status);
12621da177e4SLinus Torvalds 
1263f551e44fSChuck Lever 	/*
1264f551e44fSChuck Lever 	 * ->write_done will attempt to use post-op attributes to detect
1265f551e44fSChuck Lever 	 * conflicting writes by other clients.  A strict interpretation
1266f551e44fSChuck Lever 	 * of close-to-open would allow us to continue caching even if
1267f551e44fSChuck Lever 	 * another writer had changed the file, but some applications
1268f551e44fSChuck Lever 	 * depend on tighter cache coherency when writing.
1269f551e44fSChuck Lever 	 */
1270788e7a89STrond Myklebust 	status = NFS_PROTO(data->inode)->write_done(task, data);
1271788e7a89STrond Myklebust 	if (status != 0)
127213602896SFred Isaman 		return;
127391d5b470SChuck Lever 	nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
127491d5b470SChuck Lever 
12751da177e4SLinus Torvalds #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
12761da177e4SLinus Torvalds 	if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
12771da177e4SLinus Torvalds 		/* We tried a write call, but the server did not
12781da177e4SLinus Torvalds 		 * commit data to stable storage even though we
12791da177e4SLinus Torvalds 		 * requested it.
12801da177e4SLinus Torvalds 		 * Note: There is a known bug in Tru64 < 5.0 in which
12811da177e4SLinus Torvalds 		 *	 the server reports NFS_DATA_SYNC, but performs
12821da177e4SLinus Torvalds 		 *	 NFS_FILE_SYNC. We therefore implement this checking
12831da177e4SLinus Torvalds 		 *	 as a dprintk() in order to avoid filling syslog.
12841da177e4SLinus Torvalds 		 */
12851da177e4SLinus Torvalds 		static unsigned long    complain;
12861da177e4SLinus Torvalds 
1287a69aef14SFred Isaman 		/* Note this will print the MDS for a DS write */
12881da177e4SLinus Torvalds 		if (time_before(complain, jiffies)) {
12891da177e4SLinus Torvalds 			dprintk("NFS:       faulty NFS server %s:"
12901da177e4SLinus Torvalds 				" (committed = %d) != (stable = %d)\n",
12912b72c9ccSRakib Mullick 				NFS_SERVER(data->inode)->nfs_client->cl_hostname,
12921da177e4SLinus Torvalds 				resp->verf->committed, argp->stable);
12931da177e4SLinus Torvalds 			complain = jiffies + 300 * HZ;
12941da177e4SLinus Torvalds 		}
12951da177e4SLinus Torvalds 	}
12961da177e4SLinus Torvalds #endif
12971da177e4SLinus Torvalds 	/* Is this a short write? */
12981da177e4SLinus Torvalds 	if (task->tk_status >= 0 && resp->count < argp->count) {
12991da177e4SLinus Torvalds 		static unsigned long    complain;
13001da177e4SLinus Torvalds 
130191d5b470SChuck Lever 		nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE);
130291d5b470SChuck Lever 
13031da177e4SLinus Torvalds 		/* Has the server at least made some progress? */
13041da177e4SLinus Torvalds 		if (resp->count != 0) {
13051da177e4SLinus Torvalds 			/* Was this an NFSv2 write or an NFSv3 stable write? */
13061da177e4SLinus Torvalds 			if (resp->verf->committed != NFS_UNSTABLE) {
13071da177e4SLinus Torvalds 				/* Resend from where the server left off */
1308a69aef14SFred Isaman 				data->mds_offset += resp->count;
13091da177e4SLinus Torvalds 				argp->offset += resp->count;
13101da177e4SLinus Torvalds 				argp->pgbase += resp->count;
13111da177e4SLinus Torvalds 				argp->count -= resp->count;
13121da177e4SLinus Torvalds 			} else {
13131da177e4SLinus Torvalds 				/* Resend as a stable write in order to avoid
13141da177e4SLinus Torvalds 				 * headaches in the case of a server crash.
13151da177e4SLinus Torvalds 				 */
13161da177e4SLinus Torvalds 				argp->stable = NFS_FILE_SYNC;
13171da177e4SLinus Torvalds 			}
1318d00c5d43STrond Myklebust 			rpc_restart_call_prepare(task);
131913602896SFred Isaman 			return;
13201da177e4SLinus Torvalds 		}
13211da177e4SLinus Torvalds 		if (time_before(complain, jiffies)) {
13221da177e4SLinus Torvalds 			printk(KERN_WARNING
13231da177e4SLinus Torvalds 			       "NFS: Server wrote zero bytes, expected %u.\n",
13241da177e4SLinus Torvalds 					argp->count);
13251da177e4SLinus Torvalds 			complain = jiffies + 300 * HZ;
13261da177e4SLinus Torvalds 		}
13271da177e4SLinus Torvalds 		/* Can't do anything about it except throw an error. */
13281da177e4SLinus Torvalds 		task->tk_status = -EIO;
13291da177e4SLinus Torvalds 	}
133013602896SFred Isaman 	return;
13311da177e4SLinus Torvalds }
13321da177e4SLinus Torvalds 
13331da177e4SLinus Torvalds 
13341da177e4SLinus Torvalds #if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
133571d0a611STrond Myklebust static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
133671d0a611STrond Myklebust {
1337b8413f98STrond Myklebust 	int ret;
1338b8413f98STrond Myklebust 
133971d0a611STrond Myklebust 	if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags))
134071d0a611STrond Myklebust 		return 1;
1341b8413f98STrond Myklebust 	if (!may_wait)
134271d0a611STrond Myklebust 		return 0;
1343b8413f98STrond Myklebust 	ret = out_of_line_wait_on_bit_lock(&nfsi->flags,
1344b8413f98STrond Myklebust 				NFS_INO_COMMIT,
1345b8413f98STrond Myklebust 				nfs_wait_bit_killable,
1346b8413f98STrond Myklebust 				TASK_KILLABLE);
1347b8413f98STrond Myklebust 	return (ret < 0) ? ret : 1;
134871d0a611STrond Myklebust }
134971d0a611STrond Myklebust 
1350e0c2b380SFred Isaman void nfs_commit_clear_lock(struct nfs_inode *nfsi)
135171d0a611STrond Myklebust {
135271d0a611STrond Myklebust 	clear_bit(NFS_INO_COMMIT, &nfsi->flags);
135371d0a611STrond Myklebust 	smp_mb__after_clear_bit();
135471d0a611STrond Myklebust 	wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
135571d0a611STrond Myklebust }
1356e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
135771d0a611STrond Myklebust 
1358e0c2b380SFred Isaman void nfs_commitdata_release(void *data)
13591da177e4SLinus Torvalds {
1360383ba719STrond Myklebust 	struct nfs_write_data *wdata = data;
1361383ba719STrond Myklebust 
1362383ba719STrond Myklebust 	put_nfs_open_context(wdata->args.context);
13631da177e4SLinus Torvalds 	nfs_commit_free(wdata);
13641da177e4SLinus Torvalds }
1365e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_commitdata_release);
13661da177e4SLinus Torvalds 
1367e0c2b380SFred Isaman int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
13689ace33cdSFred Isaman 			const struct rpc_call_ops *call_ops,
1369788e7a89STrond Myklebust 			int how)
13701da177e4SLinus Torvalds {
137107737691STrond Myklebust 	struct rpc_task *task;
13729ace33cdSFred Isaman 	int priority = flush_task_priority(how);
1373bdc7f021STrond Myklebust 	struct rpc_message msg = {
1374bdc7f021STrond Myklebust 		.rpc_argp = &data->args,
1375bdc7f021STrond Myklebust 		.rpc_resp = &data->res,
13769ace33cdSFred Isaman 		.rpc_cred = data->cred,
1377bdc7f021STrond Myklebust 	};
137884115e1cSTrond Myklebust 	struct rpc_task_setup task_setup_data = {
137907737691STrond Myklebust 		.task = &data->task,
13809ace33cdSFred Isaman 		.rpc_client = clnt,
1381bdc7f021STrond Myklebust 		.rpc_message = &msg,
13829ace33cdSFred Isaman 		.callback_ops = call_ops,
138384115e1cSTrond Myklebust 		.callback_data = data,
1384101070caSTrond Myklebust 		.workqueue = nfsiod_workqueue,
13852c61be0aSTrond Myklebust 		.flags = RPC_TASK_ASYNC,
13863ff7576dSTrond Myklebust 		.priority = priority,
138784115e1cSTrond Myklebust 	};
1388788e7a89STrond Myklebust 	/* Set up the initial task struct.  */
13899ace33cdSFred Isaman 	NFS_PROTO(data->inode)->commit_setup(data, &msg);
13901da177e4SLinus Torvalds 
1391a3f565b1SChuck Lever 	dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
1392bdc7f021STrond Myklebust 
139307737691STrond Myklebust 	task = rpc_run_task(&task_setup_data);
1394dbae4c73STrond Myklebust 	if (IS_ERR(task))
1395dbae4c73STrond Myklebust 		return PTR_ERR(task);
1396d2224e7aSJeff Layton 	if (how & FLUSH_SYNC)
1397d2224e7aSJeff Layton 		rpc_wait_for_completion_task(task);
139807737691STrond Myklebust 	rpc_put_task(task);
1399dbae4c73STrond Myklebust 	return 0;
14001da177e4SLinus Torvalds }
1401e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_initiate_commit);
14021da177e4SLinus Torvalds 
14031da177e4SLinus Torvalds /*
14049ace33cdSFred Isaman  * Set up the argument/result storage required for the RPC call.
14059ace33cdSFred Isaman  */
1406e0c2b380SFred Isaman void nfs_init_commit(struct nfs_write_data *data,
1407988b6dceSFred Isaman 			    struct list_head *head,
1408988b6dceSFred Isaman 			    struct pnfs_layout_segment *lseg)
14099ace33cdSFred Isaman {
14109ace33cdSFred Isaman 	struct nfs_page *first = nfs_list_entry(head->next);
14113d4ff43dSAl Viro 	struct inode *inode = first->wb_context->dentry->d_inode;
14129ace33cdSFred Isaman 
14139ace33cdSFred Isaman 	/* Set up the RPC argument and reply structs
14149ace33cdSFred Isaman 	 * NB: take care not to mess about with data->commit et al. */
14159ace33cdSFred Isaman 
14169ace33cdSFred Isaman 	list_splice_init(head, &data->pages);
14179ace33cdSFred Isaman 
14189ace33cdSFred Isaman 	data->inode	  = inode;
14199ace33cdSFred Isaman 	data->cred	  = first->wb_context->cred;
1420988b6dceSFred Isaman 	data->lseg	  = lseg; /* reference transferred */
14219ace33cdSFred Isaman 	data->mds_ops     = &nfs_commit_ops;
14229ace33cdSFred Isaman 
14239ace33cdSFred Isaman 	data->args.fh     = NFS_FH(data->inode);
14249ace33cdSFred Isaman 	/* Note: we always request a commit of the entire inode */
14259ace33cdSFred Isaman 	data->args.offset = 0;
14269ace33cdSFred Isaman 	data->args.count  = 0;
14279ace33cdSFred Isaman 	data->args.context = get_nfs_open_context(first->wb_context);
14289ace33cdSFred Isaman 	data->res.count   = 0;
14299ace33cdSFred Isaman 	data->res.fattr   = &data->fattr;
14309ace33cdSFred Isaman 	data->res.verf    = &data->verf;
14319ace33cdSFred Isaman 	nfs_fattr_init(&data->fattr);
14329ace33cdSFred Isaman }
1433e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_init_commit);
14349ace33cdSFred Isaman 
1435e0c2b380SFred Isaman void nfs_retry_commit(struct list_head *page_list,
1436a861a1e1SFred Isaman 		      struct pnfs_layout_segment *lseg)
143764bfeb49SFred Isaman {
143864bfeb49SFred Isaman 	struct nfs_page *req;
143964bfeb49SFred Isaman 
144064bfeb49SFred Isaman 	while (!list_empty(page_list)) {
144164bfeb49SFred Isaman 		req = nfs_list_entry(page_list->next);
144264bfeb49SFred Isaman 		nfs_list_remove_request(req);
1443a861a1e1SFred Isaman 		nfs_mark_request_commit(req, lseg);
144464bfeb49SFred Isaman 		dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
144564bfeb49SFred Isaman 		dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
144664bfeb49SFred Isaman 			     BDI_RECLAIMABLE);
14479994b62bSFred Isaman 		nfs_unlock_request(req);
144864bfeb49SFred Isaman 	}
144964bfeb49SFred Isaman }
1450e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_retry_commit);
145164bfeb49SFred Isaman 
14529ace33cdSFred Isaman /*
14531da177e4SLinus Torvalds  * Commit dirty pages
14541da177e4SLinus Torvalds  */
14551da177e4SLinus Torvalds static int
145640859d7eSChuck Lever nfs_commit_list(struct inode *inode, struct list_head *head, int how)
14571da177e4SLinus Torvalds {
14581da177e4SLinus Torvalds 	struct nfs_write_data	*data;
14591da177e4SLinus Torvalds 
1460c9d8f89dSTrond Myklebust 	data = nfs_commitdata_alloc();
14611da177e4SLinus Torvalds 
14621da177e4SLinus Torvalds 	if (!data)
14631da177e4SLinus Torvalds 		goto out_bad;
14641da177e4SLinus Torvalds 
14651da177e4SLinus Torvalds 	/* Set up the argument struct */
1466988b6dceSFred Isaman 	nfs_init_commit(data, head, NULL);
14679ace33cdSFred Isaman 	return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how);
14681da177e4SLinus Torvalds  out_bad:
1469a861a1e1SFred Isaman 	nfs_retry_commit(head, NULL);
147071d0a611STrond Myklebust 	nfs_commit_clear_lock(NFS_I(inode));
14711da177e4SLinus Torvalds 	return -ENOMEM;
14721da177e4SLinus Torvalds }
14731da177e4SLinus Torvalds 
14741da177e4SLinus Torvalds /*
14751da177e4SLinus Torvalds  * COMMIT call returned
14761da177e4SLinus Torvalds  */
1477788e7a89STrond Myklebust static void nfs_commit_done(struct rpc_task *task, void *calldata)
14781da177e4SLinus Torvalds {
1479963d8fe5STrond Myklebust 	struct nfs_write_data	*data = calldata;
14801da177e4SLinus Torvalds 
1481a3f565b1SChuck Lever         dprintk("NFS: %5u nfs_commit_done (status %d)\n",
14821da177e4SLinus Torvalds                                 task->tk_pid, task->tk_status);
14831da177e4SLinus Torvalds 
1484788e7a89STrond Myklebust 	/* Call the NFS version-specific code */
1485c0d0e96bSTrond Myklebust 	NFS_PROTO(data->inode)->commit_done(task, data);
1486c9d8f89dSTrond Myklebust }
1487c9d8f89dSTrond Myklebust 
1488e0c2b380SFred Isaman void nfs_commit_release_pages(struct nfs_write_data *data)
1489c9d8f89dSTrond Myklebust {
1490c9d8f89dSTrond Myklebust 	struct nfs_page	*req;
1491c9d8f89dSTrond Myklebust 	int status = data->task.tk_status;
1492788e7a89STrond Myklebust 
14931da177e4SLinus Torvalds 	while (!list_empty(&data->pages)) {
14941da177e4SLinus Torvalds 		req = nfs_list_entry(data->pages.next);
14951da177e4SLinus Torvalds 		nfs_list_remove_request(req);
1496d6d6dc7cSFred Isaman 		nfs_clear_page_commit(req->wb_page);
14971da177e4SLinus Torvalds 
149848186c7dSChuck Lever 		dprintk("NFS:       commit (%s/%lld %d@%lld)",
14993d4ff43dSAl Viro 			req->wb_context->dentry->d_sb->s_id,
15003d4ff43dSAl Viro 			(long long)NFS_FILEID(req->wb_context->dentry->d_inode),
15011da177e4SLinus Torvalds 			req->wb_bytes,
15021da177e4SLinus Torvalds 			(long long)req_offset(req));
1503c9d8f89dSTrond Myklebust 		if (status < 0) {
1504c9d8f89dSTrond Myklebust 			nfs_context_set_write_error(req->wb_context, status);
15051da177e4SLinus Torvalds 			nfs_inode_remove_request(req);
1506c9d8f89dSTrond Myklebust 			dprintk(", error = %d\n", status);
15071da177e4SLinus Torvalds 			goto next;
15081da177e4SLinus Torvalds 		}
15091da177e4SLinus Torvalds 
15101da177e4SLinus Torvalds 		/* Okay, COMMIT succeeded, apparently. Check the verifier
15111da177e4SLinus Torvalds 		 * returned by the server against all stored verfs. */
15121da177e4SLinus Torvalds 		if (!memcmp(req->wb_verf.verifier, data->verf.verifier, sizeof(data->verf.verifier))) {
15131da177e4SLinus Torvalds 			/* We have a match */
15141da177e4SLinus Torvalds 			nfs_inode_remove_request(req);
15151da177e4SLinus Torvalds 			dprintk(" OK\n");
15161da177e4SLinus Torvalds 			goto next;
15171da177e4SLinus Torvalds 		}
15181da177e4SLinus Torvalds 		/* We have a mismatch. Write the page again */
15191da177e4SLinus Torvalds 		dprintk(" mismatch\n");
15206d884e8fSFred 		nfs_mark_request_dirty(req);
15211da177e4SLinus Torvalds 	next:
15229994b62bSFred Isaman 		nfs_unlock_request(req);
15231da177e4SLinus Torvalds 	}
15245917ce84SFred Isaman }
1525e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
15265917ce84SFred Isaman 
15275917ce84SFred Isaman static void nfs_commit_release(void *calldata)
15285917ce84SFred Isaman {
15295917ce84SFred Isaman 	struct nfs_write_data *data = calldata;
15305917ce84SFred Isaman 
15315917ce84SFred Isaman 	nfs_commit_release_pages(data);
153271d0a611STrond Myklebust 	nfs_commit_clear_lock(NFS_I(data->inode));
1533c9d8f89dSTrond Myklebust 	nfs_commitdata_release(calldata);
15341da177e4SLinus Torvalds }
1535788e7a89STrond Myklebust 
1536788e7a89STrond Myklebust static const struct rpc_call_ops nfs_commit_ops = {
153721d9a851SAndy Adamson 	.rpc_call_prepare = nfs_write_prepare,
1538788e7a89STrond Myklebust 	.rpc_call_done = nfs_commit_done,
1539788e7a89STrond Myklebust 	.rpc_release = nfs_commit_release,
1540788e7a89STrond Myklebust };
15411da177e4SLinus Torvalds 
1542b608b283STrond Myklebust int nfs_commit_inode(struct inode *inode, int how)
15431da177e4SLinus Torvalds {
15441da177e4SLinus Torvalds 	LIST_HEAD(head);
154571d0a611STrond Myklebust 	int may_wait = how & FLUSH_SYNC;
1546b8413f98STrond Myklebust 	int res;
15471da177e4SLinus Torvalds 
1548b8413f98STrond Myklebust 	res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1549b8413f98STrond Myklebust 	if (res <= 0)
1550c5efa5fcSTrond Myklebust 		goto out_mark_dirty;
1551d6d6dc7cSFred Isaman 	res = nfs_scan_commit(inode, &head);
15521da177e4SLinus Torvalds 	if (res) {
1553a861a1e1SFred Isaman 		int error;
1554a861a1e1SFred Isaman 
1555a861a1e1SFred Isaman 		error = pnfs_commit_list(inode, &head, how);
1556a861a1e1SFred Isaman 		if (error == PNFS_NOT_ATTEMPTED)
1557a861a1e1SFred Isaman 			error = nfs_commit_list(inode, &head, how);
15581da177e4SLinus Torvalds 		if (error < 0)
15591da177e4SLinus Torvalds 			return error;
1560b8413f98STrond Myklebust 		if (!may_wait)
1561b8413f98STrond Myklebust 			goto out_mark_dirty;
1562b8413f98STrond Myklebust 		error = wait_on_bit(&NFS_I(inode)->flags,
1563b8413f98STrond Myklebust 				NFS_INO_COMMIT,
156471d0a611STrond Myklebust 				nfs_wait_bit_killable,
156571d0a611STrond Myklebust 				TASK_KILLABLE);
1566b8413f98STrond Myklebust 		if (error < 0)
1567b8413f98STrond Myklebust 			return error;
156871d0a611STrond Myklebust 	} else
156971d0a611STrond Myklebust 		nfs_commit_clear_lock(NFS_I(inode));
1570c5efa5fcSTrond Myklebust 	return res;
1571c5efa5fcSTrond Myklebust 	/* Note: If we exit without ensuring that the commit is complete,
1572c5efa5fcSTrond Myklebust 	 * we must mark the inode as dirty. Otherwise, future calls to
1573c5efa5fcSTrond Myklebust 	 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure
1574c5efa5fcSTrond Myklebust 	 * that the data is on the disk.
1575c5efa5fcSTrond Myklebust 	 */
1576c5efa5fcSTrond Myklebust out_mark_dirty:
1577c5efa5fcSTrond Myklebust 	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
15781da177e4SLinus Torvalds 	return res;
15791da177e4SLinus Torvalds }
15808fc795f7STrond Myklebust 
15818fc795f7STrond Myklebust static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
15828fc795f7STrond Myklebust {
1583420e3646STrond Myklebust 	struct nfs_inode *nfsi = NFS_I(inode);
1584420e3646STrond Myklebust 	int flags = FLUSH_SYNC;
1585420e3646STrond Myklebust 	int ret = 0;
15868fc795f7STrond Myklebust 
15873236c3e1SJeff Layton 	/* no commits means nothing needs to be done */
15883236c3e1SJeff Layton 	if (!nfsi->ncommit)
15893236c3e1SJeff Layton 		return ret;
15903236c3e1SJeff Layton 
1591a00dd6c0SJeff Layton 	if (wbc->sync_mode == WB_SYNC_NONE) {
1592a00dd6c0SJeff Layton 		/* Don't commit yet if this is a non-blocking flush and there
1593a00dd6c0SJeff Layton 		 * are a lot of outstanding writes for this mapping.
1594420e3646STrond Myklebust 		 */
1595a00dd6c0SJeff Layton 		if (nfsi->ncommit <= (nfsi->npages >> 1))
1596420e3646STrond Myklebust 			goto out_mark_dirty;
1597420e3646STrond Myklebust 
1598a00dd6c0SJeff Layton 		/* don't wait for the COMMIT response */
1599420e3646STrond Myklebust 		flags = 0;
1600a00dd6c0SJeff Layton 	}
1601a00dd6c0SJeff Layton 
1602420e3646STrond Myklebust 	ret = nfs_commit_inode(inode, flags);
1603420e3646STrond Myklebust 	if (ret >= 0) {
1604420e3646STrond Myklebust 		if (wbc->sync_mode == WB_SYNC_NONE) {
1605420e3646STrond Myklebust 			if (ret < wbc->nr_to_write)
1606420e3646STrond Myklebust 				wbc->nr_to_write -= ret;
1607420e3646STrond Myklebust 			else
1608420e3646STrond Myklebust 				wbc->nr_to_write = 0;
1609420e3646STrond Myklebust 		}
16108fc795f7STrond Myklebust 		return 0;
1611420e3646STrond Myklebust 	}
1612420e3646STrond Myklebust out_mark_dirty:
16138fc795f7STrond Myklebust 	__mark_inode_dirty(inode, I_DIRTY_DATASYNC);
16148fc795f7STrond Myklebust 	return ret;
16158fc795f7STrond Myklebust }
1616c63c7b05STrond Myklebust #else
16178fc795f7STrond Myklebust static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc)
16188fc795f7STrond Myklebust {
16198fc795f7STrond Myklebust 	return 0;
16208fc795f7STrond Myklebust }
16211da177e4SLinus Torvalds #endif
16221da177e4SLinus Torvalds 
16238fc795f7STrond Myklebust int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
16248fc795f7STrond Myklebust {
1625863a3c6cSAndy Adamson 	int ret;
1626863a3c6cSAndy Adamson 
1627863a3c6cSAndy Adamson 	ret = nfs_commit_unstable_pages(inode, wbc);
1628863a3c6cSAndy Adamson 	if (ret >= 0 && test_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(inode)->flags)) {
1629ef311537SAndy Adamson 		int status;
1630ef311537SAndy Adamson 		bool sync = true;
1631863a3c6cSAndy Adamson 
1632846d5a09SWu Fengguang 		if (wbc->sync_mode == WB_SYNC_NONE)
1633ef311537SAndy Adamson 			sync = false;
1634863a3c6cSAndy Adamson 
1635863a3c6cSAndy Adamson 		status = pnfs_layoutcommit_inode(inode, sync);
1636863a3c6cSAndy Adamson 		if (status < 0)
1637863a3c6cSAndy Adamson 			return status;
1638863a3c6cSAndy Adamson 	}
1639863a3c6cSAndy Adamson 	return ret;
16408fc795f7STrond Myklebust }
16418fc795f7STrond Myklebust 
1642acdc53b2STrond Myklebust /*
1643acdc53b2STrond Myklebust  * flush the inode to disk.
1644acdc53b2STrond Myklebust  */
1645acdc53b2STrond Myklebust int nfs_wb_all(struct inode *inode)
164634901f70STrond Myklebust {
164734901f70STrond Myklebust 	struct writeback_control wbc = {
164872cb77f4STrond Myklebust 		.sync_mode = WB_SYNC_ALL,
164934901f70STrond Myklebust 		.nr_to_write = LONG_MAX,
1650d7fb1207STrond Myklebust 		.range_start = 0,
1651d7fb1207STrond Myklebust 		.range_end = LLONG_MAX,
165234901f70STrond Myklebust 	};
165334901f70STrond Myklebust 
1654acdc53b2STrond Myklebust 	return sync_inode(inode, &wbc);
16551c75950bSTrond Myklebust }
16561c75950bSTrond Myklebust 
16571b3b4a1aSTrond Myklebust int nfs_wb_page_cancel(struct inode *inode, struct page *page)
16581b3b4a1aSTrond Myklebust {
16591b3b4a1aSTrond Myklebust 	struct nfs_page *req;
16601b3b4a1aSTrond Myklebust 	int ret = 0;
16611b3b4a1aSTrond Myklebust 
16621b3b4a1aSTrond Myklebust 	BUG_ON(!PageLocked(page));
16631b3b4a1aSTrond Myklebust 	for (;;) {
1664ba8b06e6STrond Myklebust 		wait_on_page_writeback(page);
16651b3b4a1aSTrond Myklebust 		req = nfs_page_find_request(page);
16661b3b4a1aSTrond Myklebust 		if (req == NULL)
16671b3b4a1aSTrond Myklebust 			break;
16681b3b4a1aSTrond Myklebust 		if (nfs_lock_request_dontget(req)) {
16698dd37758STrond Myklebust 			nfs_clear_request_commit(req);
16701b3b4a1aSTrond Myklebust 			nfs_inode_remove_request(req);
16711b3b4a1aSTrond Myklebust 			/*
16721b3b4a1aSTrond Myklebust 			 * In case nfs_inode_remove_request has marked the
16731b3b4a1aSTrond Myklebust 			 * page as being dirty
16741b3b4a1aSTrond Myklebust 			 */
16751b3b4a1aSTrond Myklebust 			cancel_dirty_page(page, PAGE_CACHE_SIZE);
16761b3b4a1aSTrond Myklebust 			nfs_unlock_request(req);
16771b3b4a1aSTrond Myklebust 			break;
16781b3b4a1aSTrond Myklebust 		}
16791b3b4a1aSTrond Myklebust 		ret = nfs_wait_on_request(req);
1680c9edda71STrond Myklebust 		nfs_release_request(req);
16811b3b4a1aSTrond Myklebust 		if (ret < 0)
1682c988950eSTrond Myklebust 			break;
16831b3b4a1aSTrond Myklebust 	}
16841b3b4a1aSTrond Myklebust 	return ret;
16851b3b4a1aSTrond Myklebust }
16861b3b4a1aSTrond Myklebust 
16871c75950bSTrond Myklebust /*
16881c75950bSTrond Myklebust  * Write back all requests on one page - we do this before reading it.
16891c75950bSTrond Myklebust  */
16901c75950bSTrond Myklebust int nfs_wb_page(struct inode *inode, struct page *page)
16911c75950bSTrond Myklebust {
16927f2f12d9STrond Myklebust 	loff_t range_start = page_offset(page);
16937f2f12d9STrond Myklebust 	loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1);
16947f2f12d9STrond Myklebust 	struct writeback_control wbc = {
16957f2f12d9STrond Myklebust 		.sync_mode = WB_SYNC_ALL,
16967f2f12d9STrond Myklebust 		.nr_to_write = 0,
16977f2f12d9STrond Myklebust 		.range_start = range_start,
16987f2f12d9STrond Myklebust 		.range_end = range_end,
16997f2f12d9STrond Myklebust 	};
17007f2f12d9STrond Myklebust 	int ret;
17017f2f12d9STrond Myklebust 
17020522f6adSTrond Myklebust 	for (;;) {
1703ba8b06e6STrond Myklebust 		wait_on_page_writeback(page);
17047f2f12d9STrond Myklebust 		if (clear_page_dirty_for_io(page)) {
17057f2f12d9STrond Myklebust 			ret = nfs_writepage_locked(page, &wbc);
17067f2f12d9STrond Myklebust 			if (ret < 0)
17077f2f12d9STrond Myklebust 				goto out_error;
17080522f6adSTrond Myklebust 			continue;
17097f2f12d9STrond Myklebust 		}
17100522f6adSTrond Myklebust 		if (!PagePrivate(page))
17110522f6adSTrond Myklebust 			break;
17120522f6adSTrond Myklebust 		ret = nfs_commit_inode(inode, FLUSH_SYNC);
17137f2f12d9STrond Myklebust 		if (ret < 0)
17147f2f12d9STrond Myklebust 			goto out_error;
17157f2f12d9STrond Myklebust 	}
17167f2f12d9STrond Myklebust 	return 0;
17177f2f12d9STrond Myklebust out_error:
17187f2f12d9STrond Myklebust 	return ret;
17191c75950bSTrond Myklebust }
17201c75950bSTrond Myklebust 
1721074cc1deSTrond Myklebust #ifdef CONFIG_MIGRATION
1722074cc1deSTrond Myklebust int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1723a6bc32b8SMel Gorman 		struct page *page, enum migrate_mode mode)
1724074cc1deSTrond Myklebust {
17252da95652SJeff Layton 	/*
17262da95652SJeff Layton 	 * If PagePrivate is set, then the page is currently associated with
17272da95652SJeff Layton 	 * an in-progress read or write request. Don't try to migrate it.
17282da95652SJeff Layton 	 *
17292da95652SJeff Layton 	 * FIXME: we could do this in principle, but we'll need a way to ensure
17302da95652SJeff Layton 	 *        that we can safely release the inode reference while holding
17312da95652SJeff Layton 	 *        the page lock.
17322da95652SJeff Layton 	 */
17332da95652SJeff Layton 	if (PagePrivate(page))
17342da95652SJeff Layton 		return -EBUSY;
1735074cc1deSTrond Myklebust 
1736074cc1deSTrond Myklebust 	nfs_fscache_release_page(page, GFP_KERNEL);
1737074cc1deSTrond Myklebust 
1738a6bc32b8SMel Gorman 	return migrate_page(mapping, newpage, page, mode);
1739074cc1deSTrond Myklebust }
1740074cc1deSTrond Myklebust #endif
1741074cc1deSTrond Myklebust 
1742f7b422b1SDavid Howells int __init nfs_init_writepagecache(void)
17431da177e4SLinus Torvalds {
17441da177e4SLinus Torvalds 	nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
17451da177e4SLinus Torvalds 					     sizeof(struct nfs_write_data),
17461da177e4SLinus Torvalds 					     0, SLAB_HWCACHE_ALIGN,
174720c2df83SPaul Mundt 					     NULL);
17481da177e4SLinus Torvalds 	if (nfs_wdata_cachep == NULL)
17491da177e4SLinus Torvalds 		return -ENOMEM;
17501da177e4SLinus Torvalds 
175193d2341cSMatthew Dobson 	nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
17521da177e4SLinus Torvalds 						     nfs_wdata_cachep);
17531da177e4SLinus Torvalds 	if (nfs_wdata_mempool == NULL)
17541da177e4SLinus Torvalds 		return -ENOMEM;
17551da177e4SLinus Torvalds 
175693d2341cSMatthew Dobson 	nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
17571da177e4SLinus Torvalds 						      nfs_wdata_cachep);
17581da177e4SLinus Torvalds 	if (nfs_commit_mempool == NULL)
17591da177e4SLinus Torvalds 		return -ENOMEM;
17601da177e4SLinus Torvalds 
176189a09141SPeter Zijlstra 	/*
176289a09141SPeter Zijlstra 	 * NFS congestion size, scale with available memory.
176389a09141SPeter Zijlstra 	 *
176489a09141SPeter Zijlstra 	 *  64MB:    8192k
176589a09141SPeter Zijlstra 	 * 128MB:   11585k
176689a09141SPeter Zijlstra 	 * 256MB:   16384k
176789a09141SPeter Zijlstra 	 * 512MB:   23170k
176889a09141SPeter Zijlstra 	 *   1GB:   32768k
176989a09141SPeter Zijlstra 	 *   2GB:   46340k
177089a09141SPeter Zijlstra 	 *   4GB:   65536k
177189a09141SPeter Zijlstra 	 *   8GB:   92681k
177289a09141SPeter Zijlstra 	 *  16GB:  131072k
177389a09141SPeter Zijlstra 	 *
177489a09141SPeter Zijlstra 	 * This allows larger machines to have larger/more transfers.
177589a09141SPeter Zijlstra 	 * Limit the default to 256M
177689a09141SPeter Zijlstra 	 */
177789a09141SPeter Zijlstra 	nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10);
177889a09141SPeter Zijlstra 	if (nfs_congestion_kb > 256*1024)
177989a09141SPeter Zijlstra 		nfs_congestion_kb = 256*1024;
178089a09141SPeter Zijlstra 
17811da177e4SLinus Torvalds 	return 0;
17821da177e4SLinus Torvalds }
17831da177e4SLinus Torvalds 
1784266bee88SDavid Brownell void nfs_destroy_writepagecache(void)
17851da177e4SLinus Torvalds {
17861da177e4SLinus Torvalds 	mempool_destroy(nfs_commit_mempool);
17871da177e4SLinus Torvalds 	mempool_destroy(nfs_wdata_mempool);
17881a1d92c1SAlexey Dobriyan 	kmem_cache_destroy(nfs_wdata_cachep);
17891da177e4SLinus Torvalds }
17901da177e4SLinus Torvalds 
1791