11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/fs/nfs/write.c 31da177e4SLinus Torvalds * 47c85d900STrond Myklebust * Write file data over NFS. 51da177e4SLinus Torvalds * 61da177e4SLinus Torvalds * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> 71da177e4SLinus Torvalds */ 81da177e4SLinus Torvalds 91da177e4SLinus Torvalds #include <linux/types.h> 101da177e4SLinus Torvalds #include <linux/slab.h> 111da177e4SLinus Torvalds #include <linux/mm.h> 121da177e4SLinus Torvalds #include <linux/pagemap.h> 131da177e4SLinus Torvalds #include <linux/file.h> 141da177e4SLinus Torvalds #include <linux/writeback.h> 1589a09141SPeter Zijlstra #include <linux/swap.h> 16074cc1deSTrond Myklebust #include <linux/migrate.h> 171da177e4SLinus Torvalds 181da177e4SLinus Torvalds #include <linux/sunrpc/clnt.h> 191da177e4SLinus Torvalds #include <linux/nfs_fs.h> 201da177e4SLinus Torvalds #include <linux/nfs_mount.h> 211da177e4SLinus Torvalds #include <linux/nfs_page.h> 223fcfab16SAndrew Morton #include <linux/backing-dev.h> 23afeacc8cSPaul Gortmaker #include <linux/export.h> 243fcfab16SAndrew Morton 251da177e4SLinus Torvalds #include <asm/uaccess.h> 261da177e4SLinus Torvalds 271da177e4SLinus Torvalds #include "delegation.h" 2849a70f27STrond Myklebust #include "internal.h" 2991d5b470SChuck Lever #include "iostat.h" 30def6ed7eSAndy Adamson #include "nfs4_fs.h" 31074cc1deSTrond Myklebust #include "fscache.h" 3294ad1c80SFred Isaman #include "pnfs.h" 331da177e4SLinus Torvalds 34f4ce1299STrond Myklebust #include "nfstrace.h" 35f4ce1299STrond Myklebust 361da177e4SLinus Torvalds #define NFSDBG_FACILITY NFSDBG_PAGECACHE 371da177e4SLinus Torvalds 381da177e4SLinus Torvalds #define MIN_POOL_WRITE (32) 391da177e4SLinus Torvalds #define MIN_POOL_COMMIT (4) 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds /* 421da177e4SLinus Torvalds * Local function declarations 431da177e4SLinus Torvalds */ 44f8512ad0SFred Isaman static void nfs_redirty_request(struct nfs_page *req); 45788e7a89STrond Myklebust static const struct rpc_call_ops nfs_commit_ops; 46061ae2edSFred Isaman static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 47f453a54aSFred Isaman static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 484a0de55cSAnna Schumaker static const struct nfs_rw_ops nfs_rw_write_ops; 491da177e4SLinus Torvalds 50e18b890bSChristoph Lameter static struct kmem_cache *nfs_wdata_cachep; 513feb2d49STrond Myklebust static mempool_t *nfs_wdata_mempool; 520b7c0153SFred Isaman static struct kmem_cache *nfs_cdata_cachep; 531da177e4SLinus Torvalds static mempool_t *nfs_commit_mempool; 541da177e4SLinus Torvalds 550b7c0153SFred Isaman struct nfs_commit_data *nfs_commitdata_alloc(void) 561da177e4SLinus Torvalds { 57192e501bSMel Gorman struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); 5840859d7eSChuck Lever 591da177e4SLinus Torvalds if (p) { 601da177e4SLinus Torvalds memset(p, 0, sizeof(*p)); 611da177e4SLinus Torvalds INIT_LIST_HEAD(&p->pages); 621da177e4SLinus Torvalds } 631da177e4SLinus Torvalds return p; 641da177e4SLinus Torvalds } 65e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); 661da177e4SLinus Torvalds 670b7c0153SFred Isaman void nfs_commit_free(struct nfs_commit_data *p) 681da177e4SLinus Torvalds { 691da177e4SLinus Torvalds mempool_free(p, nfs_commit_mempool); 701da177e4SLinus Torvalds } 71e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_commit_free); 721da177e4SLinus Torvalds 734a0de55cSAnna Schumaker static struct nfs_rw_header *nfs_writehdr_alloc(void) 743feb2d49STrond Myklebust { 75c0752cdfSAnna Schumaker struct nfs_rw_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); 763feb2d49STrond Myklebust 774a0de55cSAnna Schumaker if (p) 783feb2d49STrond Myklebust memset(p, 0, sizeof(*p)); 793feb2d49STrond Myklebust return p; 803feb2d49STrond Myklebust } 813feb2d49STrond Myklebust 824a0de55cSAnna Schumaker static void nfs_writehdr_free(struct nfs_rw_header *whdr) 836c75dc0dSFred Isaman { 84cd841605SFred Isaman mempool_free(whdr, nfs_wdata_mempool); 853feb2d49STrond Myklebust } 861da177e4SLinus Torvalds 877b159fc1STrond Myklebust static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) 887b159fc1STrond Myklebust { 897b159fc1STrond Myklebust ctx->error = error; 907b159fc1STrond Myklebust smp_wmb(); 917b159fc1STrond Myklebust set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 927b159fc1STrond Myklebust } 937b159fc1STrond Myklebust 9429418aa4SMel Gorman static struct nfs_page * 9529418aa4SMel Gorman nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page) 96277459d2STrond Myklebust { 97277459d2STrond Myklebust struct nfs_page *req = NULL; 98277459d2STrond Myklebust 9929418aa4SMel Gorman if (PagePrivate(page)) 100277459d2STrond Myklebust req = (struct nfs_page *)page_private(page); 10129418aa4SMel Gorman else if (unlikely(PageSwapCache(page))) { 10229418aa4SMel Gorman struct nfs_page *freq, *t; 10329418aa4SMel Gorman 10429418aa4SMel Gorman /* Linearly search the commit list for the correct req */ 10529418aa4SMel Gorman list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) { 10629418aa4SMel Gorman if (freq->wb_page == page) { 10729418aa4SMel Gorman req = freq; 10829418aa4SMel Gorman break; 109277459d2STrond Myklebust } 11029418aa4SMel Gorman } 11129418aa4SMel Gorman } 11229418aa4SMel Gorman 11329418aa4SMel Gorman if (req) 11429418aa4SMel Gorman kref_get(&req->wb_kref); 11529418aa4SMel Gorman 116277459d2STrond Myklebust return req; 117277459d2STrond Myklebust } 118277459d2STrond Myklebust 119277459d2STrond Myklebust static struct nfs_page *nfs_page_find_request(struct page *page) 120277459d2STrond Myklebust { 121d56b4ddfSMel Gorman struct inode *inode = page_file_mapping(page)->host; 122277459d2STrond Myklebust struct nfs_page *req = NULL; 123277459d2STrond Myklebust 124587142f8STrond Myklebust spin_lock(&inode->i_lock); 12529418aa4SMel Gorman req = nfs_page_find_request_locked(NFS_I(inode), page); 126587142f8STrond Myklebust spin_unlock(&inode->i_lock); 127277459d2STrond Myklebust return req; 128277459d2STrond Myklebust } 129277459d2STrond Myklebust 1301da177e4SLinus Torvalds /* Adjust the file length if we're writing beyond the end */ 1311da177e4SLinus Torvalds static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) 1321da177e4SLinus Torvalds { 133d56b4ddfSMel Gorman struct inode *inode = page_file_mapping(page)->host; 134a3d01454STrond Myklebust loff_t end, i_size; 135a3d01454STrond Myklebust pgoff_t end_index; 1361da177e4SLinus Torvalds 137a3d01454STrond Myklebust spin_lock(&inode->i_lock); 138a3d01454STrond Myklebust i_size = i_size_read(inode); 139a3d01454STrond Myklebust end_index = (i_size - 1) >> PAGE_CACHE_SHIFT; 140d56b4ddfSMel Gorman if (i_size > 0 && page_file_index(page) < end_index) 141a3d01454STrond Myklebust goto out; 142d56b4ddfSMel Gorman end = page_file_offset(page) + ((loff_t)offset+count); 1431da177e4SLinus Torvalds if (i_size >= end) 144a3d01454STrond Myklebust goto out; 1451da177e4SLinus Torvalds i_size_write(inode, end); 146a3d01454STrond Myklebust nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); 147a3d01454STrond Myklebust out: 148a3d01454STrond Myklebust spin_unlock(&inode->i_lock); 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds 151a301b777STrond Myklebust /* A writeback failed: mark the page as bad, and invalidate the page cache */ 152a301b777STrond Myklebust static void nfs_set_pageerror(struct page *page) 153a301b777STrond Myklebust { 154d56b4ddfSMel Gorman nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); 155a301b777STrond Myklebust } 156a301b777STrond Myklebust 157d72ddcbaSWeston Andros Adamson /* 158d72ddcbaSWeston Andros Adamson * nfs_page_group_search_locked 159d72ddcbaSWeston Andros Adamson * @head - head request of page group 160d72ddcbaSWeston Andros Adamson * @page_offset - offset into page 161d72ddcbaSWeston Andros Adamson * 162d72ddcbaSWeston Andros Adamson * Search page group with head @head to find a request that contains the 163d72ddcbaSWeston Andros Adamson * page offset @page_offset. 164d72ddcbaSWeston Andros Adamson * 165d72ddcbaSWeston Andros Adamson * Returns a pointer to the first matching nfs request, or NULL if no 166d72ddcbaSWeston Andros Adamson * match is found. 167d72ddcbaSWeston Andros Adamson * 168d72ddcbaSWeston Andros Adamson * Must be called with the page group lock held 169d72ddcbaSWeston Andros Adamson */ 170d72ddcbaSWeston Andros Adamson static struct nfs_page * 171d72ddcbaSWeston Andros Adamson nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) 172d72ddcbaSWeston Andros Adamson { 173d72ddcbaSWeston Andros Adamson struct nfs_page *req; 174d72ddcbaSWeston Andros Adamson 175d72ddcbaSWeston Andros Adamson WARN_ON_ONCE(head != head->wb_head); 176d72ddcbaSWeston Andros Adamson WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &head->wb_head->wb_flags)); 177d72ddcbaSWeston Andros Adamson 178d72ddcbaSWeston Andros Adamson req = head; 179d72ddcbaSWeston Andros Adamson do { 180d72ddcbaSWeston Andros Adamson if (page_offset >= req->wb_pgbase && 181d72ddcbaSWeston Andros Adamson page_offset < (req->wb_pgbase + req->wb_bytes)) 182d72ddcbaSWeston Andros Adamson return req; 183d72ddcbaSWeston Andros Adamson 184d72ddcbaSWeston Andros Adamson req = req->wb_this_page; 185d72ddcbaSWeston Andros Adamson } while (req != head); 186d72ddcbaSWeston Andros Adamson 187d72ddcbaSWeston Andros Adamson return NULL; 188d72ddcbaSWeston Andros Adamson } 189d72ddcbaSWeston Andros Adamson 190d72ddcbaSWeston Andros Adamson /* 191d72ddcbaSWeston Andros Adamson * nfs_page_group_covers_page 192d72ddcbaSWeston Andros Adamson * @head - head request of page group 193d72ddcbaSWeston Andros Adamson * 194d72ddcbaSWeston Andros Adamson * Return true if the page group with head @head covers the whole page, 195d72ddcbaSWeston Andros Adamson * returns false otherwise 196d72ddcbaSWeston Andros Adamson */ 197d72ddcbaSWeston Andros Adamson static bool nfs_page_group_covers_page(struct nfs_page *req) 198d72ddcbaSWeston Andros Adamson { 199d72ddcbaSWeston Andros Adamson struct nfs_page *tmp; 200d72ddcbaSWeston Andros Adamson unsigned int pos = 0; 201d72ddcbaSWeston Andros Adamson unsigned int len = nfs_page_length(req->wb_page); 202d72ddcbaSWeston Andros Adamson 203d72ddcbaSWeston Andros Adamson nfs_page_group_lock(req); 204d72ddcbaSWeston Andros Adamson 205d72ddcbaSWeston Andros Adamson do { 206d72ddcbaSWeston Andros Adamson tmp = nfs_page_group_search_locked(req->wb_head, pos); 207d72ddcbaSWeston Andros Adamson if (tmp) { 208d72ddcbaSWeston Andros Adamson /* no way this should happen */ 209d72ddcbaSWeston Andros Adamson WARN_ON_ONCE(tmp->wb_pgbase != pos); 210d72ddcbaSWeston Andros Adamson pos += tmp->wb_bytes - (pos - tmp->wb_pgbase); 211d72ddcbaSWeston Andros Adamson } 212d72ddcbaSWeston Andros Adamson } while (tmp && pos < len); 213d72ddcbaSWeston Andros Adamson 214d72ddcbaSWeston Andros Adamson nfs_page_group_unlock(req); 215d72ddcbaSWeston Andros Adamson WARN_ON_ONCE(pos > len); 216d72ddcbaSWeston Andros Adamson return pos == len; 217d72ddcbaSWeston Andros Adamson } 218d72ddcbaSWeston Andros Adamson 2191da177e4SLinus Torvalds /* We can set the PG_uptodate flag if we see that a write request 2201da177e4SLinus Torvalds * covers the full page. 2211da177e4SLinus Torvalds */ 222d72ddcbaSWeston Andros Adamson static void nfs_mark_uptodate(struct nfs_page *req) 2231da177e4SLinus Torvalds { 224d72ddcbaSWeston Andros Adamson if (PageUptodate(req->wb_page)) 2251da177e4SLinus Torvalds return; 226d72ddcbaSWeston Andros Adamson if (!nfs_page_group_covers_page(req)) 2271da177e4SLinus Torvalds return; 228d72ddcbaSWeston Andros Adamson SetPageUptodate(req->wb_page); 2291da177e4SLinus Torvalds } 2301da177e4SLinus Torvalds 2311da177e4SLinus Torvalds static int wb_priority(struct writeback_control *wbc) 2321da177e4SLinus Torvalds { 2331da177e4SLinus Torvalds if (wbc->for_reclaim) 234c63c7b05STrond Myklebust return FLUSH_HIGHPRI | FLUSH_STABLE; 235b17621feSWu Fengguang if (wbc->for_kupdate || wbc->for_background) 236b31268acSTrond Myklebust return FLUSH_LOWPRI | FLUSH_COND_STABLE; 237b31268acSTrond Myklebust return FLUSH_COND_STABLE; 2381da177e4SLinus Torvalds } 2391da177e4SLinus Torvalds 2401da177e4SLinus Torvalds /* 24189a09141SPeter Zijlstra * NFS congestion control 24289a09141SPeter Zijlstra */ 24389a09141SPeter Zijlstra 24489a09141SPeter Zijlstra int nfs_congestion_kb; 24589a09141SPeter Zijlstra 24689a09141SPeter Zijlstra #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) 24789a09141SPeter Zijlstra #define NFS_CONGESTION_OFF_THRESH \ 24889a09141SPeter Zijlstra (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) 24989a09141SPeter Zijlstra 250deed85e7STrond Myklebust static void nfs_set_page_writeback(struct page *page) 25189a09141SPeter Zijlstra { 252deed85e7STrond Myklebust struct nfs_server *nfss = NFS_SERVER(page_file_mapping(page)->host); 2535a6d41b3STrond Myklebust int ret = test_set_page_writeback(page); 2545a6d41b3STrond Myklebust 255deed85e7STrond Myklebust WARN_ON_ONCE(ret != 0); 25689a09141SPeter Zijlstra 257277866a0SPeter Zijlstra if (atomic_long_inc_return(&nfss->writeback) > 2588aa7e847SJens Axboe NFS_CONGESTION_ON_THRESH) { 2598aa7e847SJens Axboe set_bdi_congested(&nfss->backing_dev_info, 2608aa7e847SJens Axboe BLK_RW_ASYNC); 2618aa7e847SJens Axboe } 26289a09141SPeter Zijlstra } 26389a09141SPeter Zijlstra 26420633f04SWeston Andros Adamson static void nfs_end_page_writeback(struct nfs_page *req) 26589a09141SPeter Zijlstra { 26620633f04SWeston Andros Adamson struct inode *inode = page_file_mapping(req->wb_page)->host; 26789a09141SPeter Zijlstra struct nfs_server *nfss = NFS_SERVER(inode); 26889a09141SPeter Zijlstra 26920633f04SWeston Andros Adamson if (!nfs_page_group_sync_on_bit(req, PG_WB_END)) 27020633f04SWeston Andros Adamson return; 27120633f04SWeston Andros Adamson 27220633f04SWeston Andros Adamson end_page_writeback(req->wb_page); 273c4dc4beeSPeter Zijlstra if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 2748aa7e847SJens Axboe clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 27589a09141SPeter Zijlstra } 27689a09141SPeter Zijlstra 277cfb506e1STrond Myklebust static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) 278e261f51fSTrond Myklebust { 279d56b4ddfSMel Gorman struct inode *inode = page_file_mapping(page)->host; 280e261f51fSTrond Myklebust struct nfs_page *req; 281e261f51fSTrond Myklebust int ret; 282e261f51fSTrond Myklebust 283587142f8STrond Myklebust spin_lock(&inode->i_lock); 284e261f51fSTrond Myklebust for (;;) { 28529418aa4SMel Gorman req = nfs_page_find_request_locked(NFS_I(inode), page); 286074cc1deSTrond Myklebust if (req == NULL) 287074cc1deSTrond Myklebust break; 2887ad84aa9STrond Myklebust if (nfs_lock_request(req)) 289e261f51fSTrond Myklebust break; 290e261f51fSTrond Myklebust /* Note: If we hold the page lock, as is the case in nfs_writepage, 2917ad84aa9STrond Myklebust * then the call to nfs_lock_request() will always 292e261f51fSTrond Myklebust * succeed provided that someone hasn't already marked the 293e261f51fSTrond Myklebust * request as dirty (in which case we don't care). 294e261f51fSTrond Myklebust */ 295587142f8STrond Myklebust spin_unlock(&inode->i_lock); 296cfb506e1STrond Myklebust if (!nonblock) 297e261f51fSTrond Myklebust ret = nfs_wait_on_request(req); 298cfb506e1STrond Myklebust else 299cfb506e1STrond Myklebust ret = -EAGAIN; 300e261f51fSTrond Myklebust nfs_release_request(req); 301e261f51fSTrond Myklebust if (ret != 0) 302074cc1deSTrond Myklebust return ERR_PTR(ret); 303587142f8STrond Myklebust spin_lock(&inode->i_lock); 304e261f51fSTrond Myklebust } 305587142f8STrond Myklebust spin_unlock(&inode->i_lock); 306074cc1deSTrond Myklebust return req; 307612c9384STrond Myklebust } 308074cc1deSTrond Myklebust 309074cc1deSTrond Myklebust /* 310074cc1deSTrond Myklebust * Find an associated nfs write request, and prepare to flush it out 311074cc1deSTrond Myklebust * May return an error if the user signalled nfs_wait_on_request(). 312074cc1deSTrond Myklebust */ 313074cc1deSTrond Myklebust static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 314cfb506e1STrond Myklebust struct page *page, bool nonblock) 315074cc1deSTrond Myklebust { 316074cc1deSTrond Myklebust struct nfs_page *req; 317074cc1deSTrond Myklebust int ret = 0; 318074cc1deSTrond Myklebust 319cfb506e1STrond Myklebust req = nfs_find_and_lock_request(page, nonblock); 320074cc1deSTrond Myklebust if (!req) 321074cc1deSTrond Myklebust goto out; 322074cc1deSTrond Myklebust ret = PTR_ERR(req); 323074cc1deSTrond Myklebust if (IS_ERR(req)) 324074cc1deSTrond Myklebust goto out; 325074cc1deSTrond Myklebust 326deed85e7STrond Myklebust nfs_set_page_writeback(page); 327deed85e7STrond Myklebust WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 328074cc1deSTrond Myklebust 329deed85e7STrond Myklebust ret = 0; 330f8512ad0SFred Isaman if (!nfs_pageio_add_request(pgio, req)) { 331f8512ad0SFred Isaman nfs_redirty_request(req); 332074cc1deSTrond Myklebust ret = pgio->pg_error; 333f8512ad0SFred Isaman } 334074cc1deSTrond Myklebust out: 335074cc1deSTrond Myklebust return ret; 336e261f51fSTrond Myklebust } 337e261f51fSTrond Myklebust 338f758c885STrond Myklebust static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, struct nfs_pageio_descriptor *pgio) 339f758c885STrond Myklebust { 340d56b4ddfSMel Gorman struct inode *inode = page_file_mapping(page)->host; 341cfb506e1STrond Myklebust int ret; 342f758c885STrond Myklebust 343f758c885STrond Myklebust nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 344f758c885STrond Myklebust nfs_add_stats(inode, NFSIOS_WRITEPAGES, 1); 345f758c885STrond Myklebust 346d56b4ddfSMel Gorman nfs_pageio_cond_complete(pgio, page_file_index(page)); 3471b430beeSWu Fengguang ret = nfs_page_async_flush(pgio, page, wbc->sync_mode == WB_SYNC_NONE); 348cfb506e1STrond Myklebust if (ret == -EAGAIN) { 349cfb506e1STrond Myklebust redirty_page_for_writepage(wbc, page); 350cfb506e1STrond Myklebust ret = 0; 351cfb506e1STrond Myklebust } 352cfb506e1STrond Myklebust return ret; 353f758c885STrond Myklebust } 354f758c885STrond Myklebust 355e261f51fSTrond Myklebust /* 3561da177e4SLinus Torvalds * Write an mmapped page to the server. 3571da177e4SLinus Torvalds */ 3584d770ccfSTrond Myklebust static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc) 3591da177e4SLinus Torvalds { 360f758c885STrond Myklebust struct nfs_pageio_descriptor pgio; 361e261f51fSTrond Myklebust int err; 3621da177e4SLinus Torvalds 363a20c93e3SChristoph Hellwig nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc), 364a20c93e3SChristoph Hellwig false, &nfs_async_write_completion_ops); 365f758c885STrond Myklebust err = nfs_do_writepage(page, wbc, &pgio); 366f758c885STrond Myklebust nfs_pageio_complete(&pgio); 367f758c885STrond Myklebust if (err < 0) 3684d770ccfSTrond Myklebust return err; 369f758c885STrond Myklebust if (pgio.pg_error < 0) 370f758c885STrond Myklebust return pgio.pg_error; 371f758c885STrond Myklebust return 0; 3724d770ccfSTrond Myklebust } 3734d770ccfSTrond Myklebust 3744d770ccfSTrond Myklebust int nfs_writepage(struct page *page, struct writeback_control *wbc) 3754d770ccfSTrond Myklebust { 376f758c885STrond Myklebust int ret; 3774d770ccfSTrond Myklebust 378f758c885STrond Myklebust ret = nfs_writepage_locked(page, wbc); 3791da177e4SLinus Torvalds unlock_page(page); 380f758c885STrond Myklebust return ret; 381f758c885STrond Myklebust } 382f758c885STrond Myklebust 383f758c885STrond Myklebust static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data) 384f758c885STrond Myklebust { 385f758c885STrond Myklebust int ret; 386f758c885STrond Myklebust 387f758c885STrond Myklebust ret = nfs_do_writepage(page, wbc, data); 388f758c885STrond Myklebust unlock_page(page); 389f758c885STrond Myklebust return ret; 3901da177e4SLinus Torvalds } 3911da177e4SLinus Torvalds 3921da177e4SLinus Torvalds int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) 3931da177e4SLinus Torvalds { 3941da177e4SLinus Torvalds struct inode *inode = mapping->host; 39572cb77f4STrond Myklebust unsigned long *bitlock = &NFS_I(inode)->flags; 396c63c7b05STrond Myklebust struct nfs_pageio_descriptor pgio; 3971da177e4SLinus Torvalds int err; 3981da177e4SLinus Torvalds 39972cb77f4STrond Myklebust /* Stop dirtying of new pages while we sync */ 40072cb77f4STrond Myklebust err = wait_on_bit_lock(bitlock, NFS_INO_FLUSHING, 40172cb77f4STrond Myklebust nfs_wait_bit_killable, TASK_KILLABLE); 40272cb77f4STrond Myklebust if (err) 40372cb77f4STrond Myklebust goto out_err; 40472cb77f4STrond Myklebust 40591d5b470SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); 40691d5b470SChuck Lever 407a20c93e3SChristoph Hellwig nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false, 408a20c93e3SChristoph Hellwig &nfs_async_write_completion_ops); 409f758c885STrond Myklebust err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 410c63c7b05STrond Myklebust nfs_pageio_complete(&pgio); 41172cb77f4STrond Myklebust 41272cb77f4STrond Myklebust clear_bit_unlock(NFS_INO_FLUSHING, bitlock); 4134e857c58SPeter Zijlstra smp_mb__after_atomic(); 41472cb77f4STrond Myklebust wake_up_bit(bitlock, NFS_INO_FLUSHING); 41572cb77f4STrond Myklebust 416f758c885STrond Myklebust if (err < 0) 41772cb77f4STrond Myklebust goto out_err; 41872cb77f4STrond Myklebust err = pgio.pg_error; 41972cb77f4STrond Myklebust if (err < 0) 42072cb77f4STrond Myklebust goto out_err; 421c63c7b05STrond Myklebust return 0; 42272cb77f4STrond Myklebust out_err: 42372cb77f4STrond Myklebust return err; 4241da177e4SLinus Torvalds } 4251da177e4SLinus Torvalds 4261da177e4SLinus Torvalds /* 4271da177e4SLinus Torvalds * Insert a write request into an inode 4281da177e4SLinus Torvalds */ 429d6d6dc7cSFred Isaman static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) 4301da177e4SLinus Torvalds { 4311da177e4SLinus Torvalds struct nfs_inode *nfsi = NFS_I(inode); 432e7d39069STrond Myklebust 4332bfc6e56SWeston Andros Adamson WARN_ON_ONCE(req->wb_this_page != req); 4342bfc6e56SWeston Andros Adamson 435e7d39069STrond Myklebust /* Lock the request! */ 4367ad84aa9STrond Myklebust nfs_lock_request(req); 437e7d39069STrond Myklebust 438e7d39069STrond Myklebust spin_lock(&inode->i_lock); 439011e2a7fSBryan Schumaker if (!nfsi->npages && NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 440a9a4a87aSTrond Myklebust inode->i_version++; 44129418aa4SMel Gorman /* 44229418aa4SMel Gorman * Swap-space should not get truncated. Hence no need to plug the race 44329418aa4SMel Gorman * with invalidate/truncate. 44429418aa4SMel Gorman */ 44529418aa4SMel Gorman if (likely(!PageSwapCache(req->wb_page))) { 4462df485a7STrond Myklebust set_bit(PG_MAPPED, &req->wb_flags); 447deb7d638STrond Myklebust SetPagePrivate(req->wb_page); 448277459d2STrond Myklebust set_page_private(req->wb_page, (unsigned long)req); 44929418aa4SMel Gorman } 4501da177e4SLinus Torvalds nfsi->npages++; 451*17089a29SWeston Andros Adamson /* this a head request for a page group - mark it as having an 452*17089a29SWeston Andros Adamson * extra reference so sub groups can follow suit */ 453*17089a29SWeston Andros Adamson WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); 454c03b4024STrond Myklebust kref_get(&req->wb_kref); 455e7d39069STrond Myklebust spin_unlock(&inode->i_lock); 4561da177e4SLinus Torvalds } 4571da177e4SLinus Torvalds 4581da177e4SLinus Torvalds /* 45989a09141SPeter Zijlstra * Remove a write request from an inode 4601da177e4SLinus Torvalds */ 4611da177e4SLinus Torvalds static void nfs_inode_remove_request(struct nfs_page *req) 4621da177e4SLinus Torvalds { 4633d4ff43dSAl Viro struct inode *inode = req->wb_context->dentry->d_inode; 4641da177e4SLinus Torvalds struct nfs_inode *nfsi = NFS_I(inode); 46520633f04SWeston Andros Adamson struct nfs_page *head; 46620633f04SWeston Andros Adamson 46720633f04SWeston Andros Adamson if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { 46820633f04SWeston Andros Adamson head = req->wb_head; 4691da177e4SLinus Torvalds 470587142f8STrond Myklebust spin_lock(&inode->i_lock); 47120633f04SWeston Andros Adamson if (likely(!PageSwapCache(head->wb_page))) { 47220633f04SWeston Andros Adamson set_page_private(head->wb_page, 0); 47320633f04SWeston Andros Adamson ClearPagePrivate(head->wb_page); 47420633f04SWeston Andros Adamson clear_bit(PG_MAPPED, &head->wb_flags); 47529418aa4SMel Gorman } 4761da177e4SLinus Torvalds nfsi->npages--; 477587142f8STrond Myklebust spin_unlock(&inode->i_lock); 47820633f04SWeston Andros Adamson } 479*17089a29SWeston Andros Adamson 480*17089a29SWeston Andros Adamson if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) 4811da177e4SLinus Torvalds nfs_release_request(req); 4821da177e4SLinus Torvalds } 4831da177e4SLinus Torvalds 48461822ab5STrond Myklebust static void 4856d884e8fSFred nfs_mark_request_dirty(struct nfs_page *req) 48661822ab5STrond Myklebust { 48761822ab5STrond Myklebust __set_page_dirty_nobuffers(req->wb_page); 48861822ab5STrond Myklebust } 48961822ab5STrond Myklebust 49089d77c8fSBryan Schumaker #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) 4918dd37758STrond Myklebust /** 4928dd37758STrond Myklebust * nfs_request_add_commit_list - add request to a commit list 4938dd37758STrond Myklebust * @req: pointer to a struct nfs_page 494ea2cf228SFred Isaman * @dst: commit list head 495ea2cf228SFred Isaman * @cinfo: holds list lock and accounting info 4968dd37758STrond Myklebust * 497ea2cf228SFred Isaman * This sets the PG_CLEAN bit, updates the cinfo count of 4988dd37758STrond Myklebust * number of outstanding requests requiring a commit as well as 4998dd37758STrond Myklebust * the MM page stats. 5008dd37758STrond Myklebust * 501ea2cf228SFred Isaman * The caller must _not_ hold the cinfo->lock, but must be 5028dd37758STrond Myklebust * holding the nfs_page lock. 5038dd37758STrond Myklebust */ 5048dd37758STrond Myklebust void 505ea2cf228SFred Isaman nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst, 506ea2cf228SFred Isaman struct nfs_commit_info *cinfo) 5078dd37758STrond Myklebust { 5088dd37758STrond Myklebust set_bit(PG_CLEAN, &(req)->wb_flags); 509ea2cf228SFred Isaman spin_lock(cinfo->lock); 510ea2cf228SFred Isaman nfs_list_add_request(req, dst); 511ea2cf228SFred Isaman cinfo->mds->ncommit++; 512ea2cf228SFred Isaman spin_unlock(cinfo->lock); 51356f9cd68SFred Isaman if (!cinfo->dreq) { 5148dd37758STrond Myklebust inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 515d56b4ddfSMel Gorman inc_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, 51656f9cd68SFred Isaman BDI_RECLAIMABLE); 51756f9cd68SFred Isaman __mark_inode_dirty(req->wb_context->dentry->d_inode, 51856f9cd68SFred Isaman I_DIRTY_DATASYNC); 51956f9cd68SFred Isaman } 5208dd37758STrond Myklebust } 5218dd37758STrond Myklebust EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 5228dd37758STrond Myklebust 5238dd37758STrond Myklebust /** 5248dd37758STrond Myklebust * nfs_request_remove_commit_list - Remove request from a commit list 5258dd37758STrond Myklebust * @req: pointer to a nfs_page 526ea2cf228SFred Isaman * @cinfo: holds list lock and accounting info 5278dd37758STrond Myklebust * 528ea2cf228SFred Isaman * This clears the PG_CLEAN bit, and updates the cinfo's count of 5298dd37758STrond Myklebust * number of outstanding requests requiring a commit 5308dd37758STrond Myklebust * It does not update the MM page stats. 5318dd37758STrond Myklebust * 532ea2cf228SFred Isaman * The caller _must_ hold the cinfo->lock and the nfs_page lock. 5338dd37758STrond Myklebust */ 5348dd37758STrond Myklebust void 535ea2cf228SFred Isaman nfs_request_remove_commit_list(struct nfs_page *req, 536ea2cf228SFred Isaman struct nfs_commit_info *cinfo) 5378dd37758STrond Myklebust { 5388dd37758STrond Myklebust if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) 5398dd37758STrond Myklebust return; 5408dd37758STrond Myklebust nfs_list_remove_request(req); 541ea2cf228SFred Isaman cinfo->mds->ncommit--; 5428dd37758STrond Myklebust } 5438dd37758STrond Myklebust EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); 5448dd37758STrond Myklebust 545ea2cf228SFred Isaman static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 546ea2cf228SFred Isaman struct inode *inode) 547ea2cf228SFred Isaman { 548ea2cf228SFred Isaman cinfo->lock = &inode->i_lock; 549ea2cf228SFred Isaman cinfo->mds = &NFS_I(inode)->commit_info; 550ea2cf228SFred Isaman cinfo->ds = pnfs_get_ds_info(inode); 551b359f9d0SFred Isaman cinfo->dreq = NULL; 552f453a54aSFred Isaman cinfo->completion_ops = &nfs_commit_completion_ops; 553ea2cf228SFred Isaman } 554ea2cf228SFred Isaman 555ea2cf228SFred Isaman void nfs_init_cinfo(struct nfs_commit_info *cinfo, 556ea2cf228SFred Isaman struct inode *inode, 557ea2cf228SFred Isaman struct nfs_direct_req *dreq) 558ea2cf228SFred Isaman { 5591763da12SFred Isaman if (dreq) 5601763da12SFred Isaman nfs_init_cinfo_from_dreq(cinfo, dreq); 5611763da12SFred Isaman else 562ea2cf228SFred Isaman nfs_init_cinfo_from_inode(cinfo, inode); 563ea2cf228SFred Isaman } 564ea2cf228SFred Isaman EXPORT_SYMBOL_GPL(nfs_init_cinfo); 5658dd37758STrond Myklebust 5661da177e4SLinus Torvalds /* 5671da177e4SLinus Torvalds * Add a request to the inode's commit list. 5681da177e4SLinus Torvalds */ 5691763da12SFred Isaman void 570ea2cf228SFred Isaman nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 571ea2cf228SFred Isaman struct nfs_commit_info *cinfo) 5721da177e4SLinus Torvalds { 573ea2cf228SFred Isaman if (pnfs_mark_request_commit(req, lseg, cinfo)) 5748dd37758STrond Myklebust return; 575ea2cf228SFred Isaman nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo); 5761da177e4SLinus Torvalds } 5778e821cadSTrond Myklebust 578d6d6dc7cSFred Isaman static void 579d6d6dc7cSFred Isaman nfs_clear_page_commit(struct page *page) 580e468bae9STrond Myklebust { 581e468bae9STrond Myklebust dec_zone_page_state(page, NR_UNSTABLE_NFS); 582d56b4ddfSMel Gorman dec_bdi_stat(page_file_mapping(page)->backing_dev_info, BDI_RECLAIMABLE); 583e468bae9STrond Myklebust } 584d6d6dc7cSFred Isaman 5858dd37758STrond Myklebust static void 586d6d6dc7cSFred Isaman nfs_clear_request_commit(struct nfs_page *req) 587d6d6dc7cSFred Isaman { 5888dd37758STrond Myklebust if (test_bit(PG_CLEAN, &req->wb_flags)) { 5898dd37758STrond Myklebust struct inode *inode = req->wb_context->dentry->d_inode; 590ea2cf228SFred Isaman struct nfs_commit_info cinfo; 591d6d6dc7cSFred Isaman 592ea2cf228SFred Isaman nfs_init_cinfo_from_inode(&cinfo, inode); 593ea2cf228SFred Isaman if (!pnfs_clear_request_commit(req, &cinfo)) { 594ea2cf228SFred Isaman spin_lock(cinfo.lock); 595ea2cf228SFred Isaman nfs_request_remove_commit_list(req, &cinfo); 596ea2cf228SFred Isaman spin_unlock(cinfo.lock); 597d6d6dc7cSFred Isaman } 5988dd37758STrond Myklebust nfs_clear_page_commit(req->wb_page); 5998dd37758STrond Myklebust } 600e468bae9STrond Myklebust } 601e468bae9STrond Myklebust 6028e821cadSTrond Myklebust static inline 6039c7e1b3dSAnna Schumaker int nfs_write_need_commit(struct nfs_pgio_data *data) 6048e821cadSTrond Myklebust { 605465d5243SFred Isaman if (data->verf.committed == NFS_DATA_SYNC) 606cd841605SFred Isaman return data->header->lseg == NULL; 6078e821cadSTrond Myklebust return data->verf.committed != NFS_FILE_SYNC; 6088e821cadSTrond Myklebust } 6098e821cadSTrond Myklebust 6108e821cadSTrond Myklebust #else 61168cd6fa4SBryan Schumaker static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 61268cd6fa4SBryan Schumaker struct inode *inode) 61368cd6fa4SBryan Schumaker { 61468cd6fa4SBryan Schumaker } 61568cd6fa4SBryan Schumaker 61668cd6fa4SBryan Schumaker void nfs_init_cinfo(struct nfs_commit_info *cinfo, 61768cd6fa4SBryan Schumaker struct inode *inode, 61868cd6fa4SBryan Schumaker struct nfs_direct_req *dreq) 61968cd6fa4SBryan Schumaker { 62068cd6fa4SBryan Schumaker } 62168cd6fa4SBryan Schumaker 6221763da12SFred Isaman void 623ea2cf228SFred Isaman nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 624ea2cf228SFred Isaman struct nfs_commit_info *cinfo) 6258e821cadSTrond Myklebust { 6268e821cadSTrond Myklebust } 6278e821cadSTrond Myklebust 6288dd37758STrond Myklebust static void 629e468bae9STrond Myklebust nfs_clear_request_commit(struct nfs_page *req) 630e468bae9STrond Myklebust { 631e468bae9STrond Myklebust } 632e468bae9STrond Myklebust 6338e821cadSTrond Myklebust static inline 6349c7e1b3dSAnna Schumaker int nfs_write_need_commit(struct nfs_pgio_data *data) 6358e821cadSTrond Myklebust { 6368e821cadSTrond Myklebust return 0; 6378e821cadSTrond Myklebust } 6388e821cadSTrond Myklebust 6391da177e4SLinus Torvalds #endif 6401da177e4SLinus Torvalds 641061ae2edSFred Isaman static void nfs_write_completion(struct nfs_pgio_header *hdr) 6426c75dc0dSFred Isaman { 643ea2cf228SFred Isaman struct nfs_commit_info cinfo; 6446c75dc0dSFred Isaman unsigned long bytes = 0; 6452bfc6e56SWeston Andros Adamson bool do_destroy; 6466c75dc0dSFred Isaman 6476c75dc0dSFred Isaman if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 6486c75dc0dSFred Isaman goto out; 649ea2cf228SFred Isaman nfs_init_cinfo_from_inode(&cinfo, hdr->inode); 6506c75dc0dSFred Isaman while (!list_empty(&hdr->pages)) { 6516c75dc0dSFred Isaman struct nfs_page *req = nfs_list_entry(hdr->pages.next); 6526c75dc0dSFred Isaman 6536c75dc0dSFred Isaman bytes += req->wb_bytes; 6546c75dc0dSFred Isaman nfs_list_remove_request(req); 6556c75dc0dSFred Isaman if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && 6566c75dc0dSFred Isaman (hdr->good_bytes < bytes)) { 657d1182b33STrond Myklebust nfs_set_pageerror(req->wb_page); 6586c75dc0dSFred Isaman nfs_context_set_write_error(req->wb_context, hdr->error); 6596c75dc0dSFred Isaman goto remove_req; 6606c75dc0dSFred Isaman } 6616c75dc0dSFred Isaman if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) { 6626c75dc0dSFred Isaman nfs_mark_request_dirty(req); 6636c75dc0dSFred Isaman goto next; 6646c75dc0dSFred Isaman } 6656c75dc0dSFred Isaman if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { 666f79d06f5SAnna Schumaker memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); 667ea2cf228SFred Isaman nfs_mark_request_commit(req, hdr->lseg, &cinfo); 6686c75dc0dSFred Isaman goto next; 6696c75dc0dSFred Isaman } 6706c75dc0dSFred Isaman remove_req: 6716c75dc0dSFred Isaman nfs_inode_remove_request(req); 6726c75dc0dSFred Isaman next: 6731d1afcbcSTrond Myklebust nfs_unlock_request(req); 67420633f04SWeston Andros Adamson nfs_end_page_writeback(req); 6752bfc6e56SWeston Andros Adamson do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags); 6763aff4ebbSTrond Myklebust nfs_release_request(req); 6776c75dc0dSFred Isaman } 6786c75dc0dSFred Isaman out: 6796c75dc0dSFred Isaman hdr->release(hdr); 6806c75dc0dSFred Isaman } 6816c75dc0dSFred Isaman 68289d77c8fSBryan Schumaker #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) 683ce59515cSAnna Schumaker unsigned long 684ea2cf228SFred Isaman nfs_reqs_to_commit(struct nfs_commit_info *cinfo) 685fb8a1f11STrond Myklebust { 686ea2cf228SFred Isaman return cinfo->mds->ncommit; 687fb8a1f11STrond Myklebust } 688fb8a1f11STrond Myklebust 689ea2cf228SFred Isaman /* cinfo->lock held by caller */ 6901763da12SFred Isaman int 691ea2cf228SFred Isaman nfs_scan_commit_list(struct list_head *src, struct list_head *dst, 692ea2cf228SFred Isaman struct nfs_commit_info *cinfo, int max) 693d6d6dc7cSFred Isaman { 694d6d6dc7cSFred Isaman struct nfs_page *req, *tmp; 695d6d6dc7cSFred Isaman int ret = 0; 696d6d6dc7cSFred Isaman 697d6d6dc7cSFred Isaman list_for_each_entry_safe(req, tmp, src, wb_list) { 6988dd37758STrond Myklebust if (!nfs_lock_request(req)) 6998dd37758STrond Myklebust continue; 7007ad84aa9STrond Myklebust kref_get(&req->wb_kref); 701ea2cf228SFred Isaman if (cond_resched_lock(cinfo->lock)) 7023b3be88dSTrond Myklebust list_safe_reset_next(req, tmp, wb_list); 703ea2cf228SFred Isaman nfs_request_remove_commit_list(req, cinfo); 7048dd37758STrond Myklebust nfs_list_add_request(req, dst); 705d6d6dc7cSFred Isaman ret++; 7061763da12SFred Isaman if ((ret == max) && !cinfo->dreq) 707d6d6dc7cSFred Isaman break; 708d6d6dc7cSFred Isaman } 709d6d6dc7cSFred Isaman return ret; 710d6d6dc7cSFred Isaman } 711d6d6dc7cSFred Isaman 7121da177e4SLinus Torvalds /* 7131da177e4SLinus Torvalds * nfs_scan_commit - Scan an inode for commit requests 7141da177e4SLinus Torvalds * @inode: NFS inode to scan 715ea2cf228SFred Isaman * @dst: mds destination list 716ea2cf228SFred Isaman * @cinfo: mds and ds lists of reqs ready to commit 7171da177e4SLinus Torvalds * 7181da177e4SLinus Torvalds * Moves requests from the inode's 'commit' request list. 7191da177e4SLinus Torvalds * The requests are *not* checked to ensure that they form a contiguous set. 7201da177e4SLinus Torvalds */ 7211763da12SFred Isaman int 722ea2cf228SFred Isaman nfs_scan_commit(struct inode *inode, struct list_head *dst, 723ea2cf228SFred Isaman struct nfs_commit_info *cinfo) 7241da177e4SLinus Torvalds { 725d6d6dc7cSFred Isaman int ret = 0; 726fb8a1f11STrond Myklebust 727ea2cf228SFred Isaman spin_lock(cinfo->lock); 728ea2cf228SFred Isaman if (cinfo->mds->ncommit > 0) { 7298dd37758STrond Myklebust const int max = INT_MAX; 730d6d6dc7cSFred Isaman 731ea2cf228SFred Isaman ret = nfs_scan_commit_list(&cinfo->mds->list, dst, 732ea2cf228SFred Isaman cinfo, max); 733ea2cf228SFred Isaman ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); 734d6d6dc7cSFred Isaman } 735ea2cf228SFred Isaman spin_unlock(cinfo->lock); 736ff778d02STrond Myklebust return ret; 7371da177e4SLinus Torvalds } 738d6d6dc7cSFred Isaman 739c42de9ddSTrond Myklebust #else 740ce59515cSAnna Schumaker unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo) 741fb8a1f11STrond Myklebust { 742fb8a1f11STrond Myklebust return 0; 743fb8a1f11STrond Myklebust } 744fb8a1f11STrond Myklebust 7451763da12SFred Isaman int nfs_scan_commit(struct inode *inode, struct list_head *dst, 746ea2cf228SFred Isaman struct nfs_commit_info *cinfo) 747c42de9ddSTrond Myklebust { 748c42de9ddSTrond Myklebust return 0; 749c42de9ddSTrond Myklebust } 7501da177e4SLinus Torvalds #endif 7511da177e4SLinus Torvalds 7521da177e4SLinus Torvalds /* 753e7d39069STrond Myklebust * Search for an existing write request, and attempt to update 754e7d39069STrond Myklebust * it to reflect a new dirty region on a given page. 7551da177e4SLinus Torvalds * 756e7d39069STrond Myklebust * If the attempt fails, then the existing request is flushed out 757e7d39069STrond Myklebust * to disk. 7581da177e4SLinus Torvalds */ 759e7d39069STrond Myklebust static struct nfs_page *nfs_try_to_update_request(struct inode *inode, 760e7d39069STrond Myklebust struct page *page, 761e7d39069STrond Myklebust unsigned int offset, 762e7d39069STrond Myklebust unsigned int bytes) 7631da177e4SLinus Torvalds { 764e7d39069STrond Myklebust struct nfs_page *req; 765e7d39069STrond Myklebust unsigned int rqend; 766e7d39069STrond Myklebust unsigned int end; 7671da177e4SLinus Torvalds int error; 768277459d2STrond Myklebust 769e7d39069STrond Myklebust if (!PagePrivate(page)) 770e7d39069STrond Myklebust return NULL; 771e7d39069STrond Myklebust 772e7d39069STrond Myklebust end = offset + bytes; 773e7d39069STrond Myklebust spin_lock(&inode->i_lock); 774e7d39069STrond Myklebust 775e7d39069STrond Myklebust for (;;) { 77629418aa4SMel Gorman req = nfs_page_find_request_locked(NFS_I(inode), page); 777e7d39069STrond Myklebust if (req == NULL) 778e7d39069STrond Myklebust goto out_unlock; 779e7d39069STrond Myklebust 7802bfc6e56SWeston Andros Adamson /* should be handled by nfs_flush_incompatible */ 7812bfc6e56SWeston Andros Adamson WARN_ON_ONCE(req->wb_head != req); 7822bfc6e56SWeston Andros Adamson WARN_ON_ONCE(req->wb_this_page != req); 7832bfc6e56SWeston Andros Adamson 784e7d39069STrond Myklebust rqend = req->wb_offset + req->wb_bytes; 785e7d39069STrond Myklebust /* 786e7d39069STrond Myklebust * Tell the caller to flush out the request if 787e7d39069STrond Myklebust * the offsets are non-contiguous. 788e7d39069STrond Myklebust * Note: nfs_flush_incompatible() will already 789e7d39069STrond Myklebust * have flushed out requests having wrong owners. 790e7d39069STrond Myklebust */ 791e468bae9STrond Myklebust if (offset > rqend 792e7d39069STrond Myklebust || end < req->wb_offset) 793e7d39069STrond Myklebust goto out_flushme; 794e7d39069STrond Myklebust 7957ad84aa9STrond Myklebust if (nfs_lock_request(req)) 796e7d39069STrond Myklebust break; 797e7d39069STrond Myklebust 798e7d39069STrond Myklebust /* The request is locked, so wait and then retry */ 799587142f8STrond Myklebust spin_unlock(&inode->i_lock); 8001da177e4SLinus Torvalds error = nfs_wait_on_request(req); 8011da177e4SLinus Torvalds nfs_release_request(req); 802e7d39069STrond Myklebust if (error != 0) 803e7d39069STrond Myklebust goto out_err; 804e7d39069STrond Myklebust spin_lock(&inode->i_lock); 8051da177e4SLinus Torvalds } 8061da177e4SLinus Torvalds 8071da177e4SLinus Torvalds /* Okay, the request matches. Update the region */ 8081da177e4SLinus Torvalds if (offset < req->wb_offset) { 8091da177e4SLinus Torvalds req->wb_offset = offset; 8101da177e4SLinus Torvalds req->wb_pgbase = offset; 8111da177e4SLinus Torvalds } 8121da177e4SLinus Torvalds if (end > rqend) 8131da177e4SLinus Torvalds req->wb_bytes = end - req->wb_offset; 814e7d39069STrond Myklebust else 815e7d39069STrond Myklebust req->wb_bytes = rqend - req->wb_offset; 816e7d39069STrond Myklebust out_unlock: 817e7d39069STrond Myklebust spin_unlock(&inode->i_lock); 818ca138f36SFred Isaman if (req) 8198dd37758STrond Myklebust nfs_clear_request_commit(req); 820e7d39069STrond Myklebust return req; 821e7d39069STrond Myklebust out_flushme: 822e7d39069STrond Myklebust spin_unlock(&inode->i_lock); 823e7d39069STrond Myklebust nfs_release_request(req); 824e7d39069STrond Myklebust error = nfs_wb_page(inode, page); 825e7d39069STrond Myklebust out_err: 826e7d39069STrond Myklebust return ERR_PTR(error); 827e7d39069STrond Myklebust } 8281da177e4SLinus Torvalds 829e7d39069STrond Myklebust /* 830e7d39069STrond Myklebust * Try to update an existing write request, or create one if there is none. 831e7d39069STrond Myklebust * 832e7d39069STrond Myklebust * Note: Should always be called with the Page Lock held to prevent races 833e7d39069STrond Myklebust * if we have to add a new request. Also assumes that the caller has 834e7d39069STrond Myklebust * already called nfs_flush_incompatible() if necessary. 835e7d39069STrond Myklebust */ 836e7d39069STrond Myklebust static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, 837e7d39069STrond Myklebust struct page *page, unsigned int offset, unsigned int bytes) 838e7d39069STrond Myklebust { 839d56b4ddfSMel Gorman struct inode *inode = page_file_mapping(page)->host; 840e7d39069STrond Myklebust struct nfs_page *req; 841e7d39069STrond Myklebust 842e7d39069STrond Myklebust req = nfs_try_to_update_request(inode, page, offset, bytes); 843e7d39069STrond Myklebust if (req != NULL) 844e7d39069STrond Myklebust goto out; 8452bfc6e56SWeston Andros Adamson req = nfs_create_request(ctx, page, NULL, offset, bytes); 846e7d39069STrond Myklebust if (IS_ERR(req)) 847e7d39069STrond Myklebust goto out; 848d6d6dc7cSFred Isaman nfs_inode_add_request(inode, req); 849efc91ed0STrond Myklebust out: 85061e930a9STrond Myklebust return req; 8511da177e4SLinus Torvalds } 8521da177e4SLinus Torvalds 853e7d39069STrond Myklebust static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, 854e7d39069STrond Myklebust unsigned int offset, unsigned int count) 855e7d39069STrond Myklebust { 856e7d39069STrond Myklebust struct nfs_page *req; 857e7d39069STrond Myklebust 858e7d39069STrond Myklebust req = nfs_setup_write_request(ctx, page, offset, count); 859e7d39069STrond Myklebust if (IS_ERR(req)) 860e7d39069STrond Myklebust return PTR_ERR(req); 861e7d39069STrond Myklebust /* Update file length */ 862e7d39069STrond Myklebust nfs_grow_file(page, offset, count); 863d72ddcbaSWeston Andros Adamson nfs_mark_uptodate(req); 864a6305ddbSTrond Myklebust nfs_mark_request_dirty(req); 8651d1afcbcSTrond Myklebust nfs_unlock_and_release_request(req); 866e7d39069STrond Myklebust return 0; 867e7d39069STrond Myklebust } 868e7d39069STrond Myklebust 8691da177e4SLinus Torvalds int nfs_flush_incompatible(struct file *file, struct page *page) 8701da177e4SLinus Torvalds { 871cd3758e3STrond Myklebust struct nfs_open_context *ctx = nfs_file_open_context(file); 8722a369153STrond Myklebust struct nfs_lock_context *l_ctx; 8731da177e4SLinus Torvalds struct nfs_page *req; 8741a54533eSTrond Myklebust int do_flush, status; 8751da177e4SLinus Torvalds /* 8761da177e4SLinus Torvalds * Look for a request corresponding to this page. If there 8771da177e4SLinus Torvalds * is one, and it belongs to another file, we flush it out 8781da177e4SLinus Torvalds * before we try to copy anything into the page. Do this 8791da177e4SLinus Torvalds * due to the lack of an ACCESS-type call in NFSv2. 8801da177e4SLinus Torvalds * Also do the same if we find a request from an existing 8811da177e4SLinus Torvalds * dropped page. 8821da177e4SLinus Torvalds */ 8831a54533eSTrond Myklebust do { 884277459d2STrond Myklebust req = nfs_page_find_request(page); 8851a54533eSTrond Myklebust if (req == NULL) 8861a54533eSTrond Myklebust return 0; 8872a369153STrond Myklebust l_ctx = req->wb_lock_context; 8882a369153STrond Myklebust do_flush = req->wb_page != page || req->wb_context != ctx; 8892bfc6e56SWeston Andros Adamson /* for now, flush if more than 1 request in page_group */ 8902bfc6e56SWeston Andros Adamson do_flush |= req->wb_this_page != req; 8910f1d2605STrond Myklebust if (l_ctx && ctx->dentry->d_inode->i_flock != NULL) { 8922a369153STrond Myklebust do_flush |= l_ctx->lockowner.l_owner != current->files 8932a369153STrond Myklebust || l_ctx->lockowner.l_pid != current->tgid; 8942a369153STrond Myklebust } 8951da177e4SLinus Torvalds nfs_release_request(req); 8961a54533eSTrond Myklebust if (!do_flush) 8971a54533eSTrond Myklebust return 0; 898d56b4ddfSMel Gorman status = nfs_wb_page(page_file_mapping(page)->host, page); 8991a54533eSTrond Myklebust } while (status == 0); 9001a54533eSTrond Myklebust return status; 9011da177e4SLinus Torvalds } 9021da177e4SLinus Torvalds 9031da177e4SLinus Torvalds /* 904dc24826bSAndy Adamson * Avoid buffered writes when a open context credential's key would 905dc24826bSAndy Adamson * expire soon. 906dc24826bSAndy Adamson * 907dc24826bSAndy Adamson * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL. 908dc24826bSAndy Adamson * 909dc24826bSAndy Adamson * Return 0 and set a credential flag which triggers the inode to flush 910dc24826bSAndy Adamson * and performs NFS_FILE_SYNC writes if the key will expired within 911dc24826bSAndy Adamson * RPC_KEY_EXPIRE_TIMEO. 912dc24826bSAndy Adamson */ 913dc24826bSAndy Adamson int 914dc24826bSAndy Adamson nfs_key_timeout_notify(struct file *filp, struct inode *inode) 915dc24826bSAndy Adamson { 916dc24826bSAndy Adamson struct nfs_open_context *ctx = nfs_file_open_context(filp); 917dc24826bSAndy Adamson struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; 918dc24826bSAndy Adamson 919dc24826bSAndy Adamson return rpcauth_key_timeout_notify(auth, ctx->cred); 920dc24826bSAndy Adamson } 921dc24826bSAndy Adamson 922dc24826bSAndy Adamson /* 923dc24826bSAndy Adamson * Test if the open context credential key is marked to expire soon. 924dc24826bSAndy Adamson */ 925dc24826bSAndy Adamson bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx) 926dc24826bSAndy Adamson { 927dc24826bSAndy Adamson return rpcauth_cred_key_to_expire(ctx->cred); 928dc24826bSAndy Adamson } 929dc24826bSAndy Adamson 930dc24826bSAndy Adamson /* 9315d47a356STrond Myklebust * If the page cache is marked as unsafe or invalid, then we can't rely on 9325d47a356STrond Myklebust * the PageUptodate() flag. In this case, we will need to turn off 9335d47a356STrond Myklebust * write optimisations that depend on the page contents being correct. 9345d47a356STrond Myklebust */ 9358d197a56STrond Myklebust static bool nfs_write_pageuptodate(struct page *page, struct inode *inode) 9365d47a356STrond Myklebust { 937d529ef83SJeff Layton struct nfs_inode *nfsi = NFS_I(inode); 938d529ef83SJeff Layton 9398d197a56STrond Myklebust if (nfs_have_delegated_attributes(inode)) 9408d197a56STrond Myklebust goto out; 94118dd78c4SScott Mayhew if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) 942d529ef83SJeff Layton return false; 9434db72b40SJeff Layton smp_rmb(); 944d529ef83SJeff Layton if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags)) 9458d197a56STrond Myklebust return false; 9468d197a56STrond Myklebust out: 94718dd78c4SScott Mayhew if (nfsi->cache_validity & NFS_INO_INVALID_DATA) 94818dd78c4SScott Mayhew return false; 9498d197a56STrond Myklebust return PageUptodate(page) != 0; 9505d47a356STrond Myklebust } 9515d47a356STrond Myklebust 952c7559663SScott Mayhew /* If we know the page is up to date, and we're not using byte range locks (or 953c7559663SScott Mayhew * if we have the whole file locked for writing), it may be more efficient to 954c7559663SScott Mayhew * extend the write to cover the entire page in order to avoid fragmentation 955c7559663SScott Mayhew * inefficiencies. 956c7559663SScott Mayhew * 957263b4509SScott Mayhew * If the file is opened for synchronous writes then we can just skip the rest 958263b4509SScott Mayhew * of the checks. 959c7559663SScott Mayhew */ 960c7559663SScott Mayhew static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) 961c7559663SScott Mayhew { 962c7559663SScott Mayhew if (file->f_flags & O_DSYNC) 963c7559663SScott Mayhew return 0; 964263b4509SScott Mayhew if (!nfs_write_pageuptodate(page, inode)) 965263b4509SScott Mayhew return 0; 966c7559663SScott Mayhew if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 967c7559663SScott Mayhew return 1; 968263b4509SScott Mayhew if (inode->i_flock == NULL || (inode->i_flock->fl_start == 0 && 969c7559663SScott Mayhew inode->i_flock->fl_end == OFFSET_MAX && 970263b4509SScott Mayhew inode->i_flock->fl_type != F_RDLCK)) 971c7559663SScott Mayhew return 1; 972c7559663SScott Mayhew return 0; 973c7559663SScott Mayhew } 974c7559663SScott Mayhew 9755d47a356STrond Myklebust /* 9761da177e4SLinus Torvalds * Update and possibly write a cached page of an NFS file. 9771da177e4SLinus Torvalds * 9781da177e4SLinus Torvalds * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad 9791da177e4SLinus Torvalds * things with a page scheduled for an RPC call (e.g. invalidate it). 9801da177e4SLinus Torvalds */ 9811da177e4SLinus Torvalds int nfs_updatepage(struct file *file, struct page *page, 9821da177e4SLinus Torvalds unsigned int offset, unsigned int count) 9831da177e4SLinus Torvalds { 984cd3758e3STrond Myklebust struct nfs_open_context *ctx = nfs_file_open_context(file); 985d56b4ddfSMel Gorman struct inode *inode = page_file_mapping(page)->host; 9861da177e4SLinus Torvalds int status = 0; 9871da177e4SLinus Torvalds 98891d5b470SChuck Lever nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 98991d5b470SChuck Lever 9906de1472fSAl Viro dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n", 9916de1472fSAl Viro file, count, (long long)(page_file_offset(page) + offset)); 9921da177e4SLinus Torvalds 993c7559663SScott Mayhew if (nfs_can_extend_write(file, page, inode)) { 99449a70f27STrond Myklebust count = max(count + offset, nfs_page_length(page)); 9951da177e4SLinus Torvalds offset = 0; 9961da177e4SLinus Torvalds } 9971da177e4SLinus Torvalds 998e21195a7STrond Myklebust status = nfs_writepage_setup(ctx, page, offset, count); 99903fa9e84STrond Myklebust if (status < 0) 100003fa9e84STrond Myklebust nfs_set_pageerror(page); 100159b7c05fSTrond Myklebust else 100259b7c05fSTrond Myklebust __set_page_dirty_nobuffers(page); 10031da177e4SLinus Torvalds 100448186c7dSChuck Lever dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", 10051da177e4SLinus Torvalds status, (long long)i_size_read(inode)); 10061da177e4SLinus Torvalds return status; 10071da177e4SLinus Torvalds } 10081da177e4SLinus Torvalds 10093ff7576dSTrond Myklebust static int flush_task_priority(int how) 10101da177e4SLinus Torvalds { 10111da177e4SLinus Torvalds switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { 10121da177e4SLinus Torvalds case FLUSH_HIGHPRI: 10131da177e4SLinus Torvalds return RPC_PRIORITY_HIGH; 10141da177e4SLinus Torvalds case FLUSH_LOWPRI: 10151da177e4SLinus Torvalds return RPC_PRIORITY_LOW; 10161da177e4SLinus Torvalds } 10171da177e4SLinus Torvalds return RPC_PRIORITY_NORMAL; 10181da177e4SLinus Torvalds } 10191da177e4SLinus Torvalds 10201ed26f33SAnna Schumaker static void nfs_initiate_write(struct nfs_pgio_data *data, struct rpc_message *msg, 10211ed26f33SAnna Schumaker struct rpc_task_setup *task_setup_data, int how) 10221da177e4SLinus Torvalds { 1023cd841605SFred Isaman struct inode *inode = data->header->inode; 10243ff7576dSTrond Myklebust int priority = flush_task_priority(how); 10251da177e4SLinus Torvalds 10261ed26f33SAnna Schumaker task_setup_data->priority = priority; 10271ed26f33SAnna Schumaker NFS_PROTO(inode)->write_setup(data, msg); 1028d138d5d1SAndy Adamson 10298c21c62cSWeston Andros Adamson nfs4_state_protect_write(NFS_SERVER(inode)->nfs_client, 10301ed26f33SAnna Schumaker &task_setup_data->rpc_client, msg, data); 1031275acaafSTrond Myklebust } 1032275acaafSTrond Myklebust 10336d884e8fSFred /* If a nfs_flush_* function fails, it should remove reqs from @head and 10346d884e8fSFred * call this on each, which will prepare them to be retried on next 10356d884e8fSFred * writeback using standard nfs. 10366d884e8fSFred */ 10376d884e8fSFred static void nfs_redirty_request(struct nfs_page *req) 10386d884e8fSFred { 10396d884e8fSFred nfs_mark_request_dirty(req); 10401d1afcbcSTrond Myklebust nfs_unlock_request(req); 104120633f04SWeston Andros Adamson nfs_end_page_writeback(req); 10423aff4ebbSTrond Myklebust nfs_release_request(req); 10436d884e8fSFred } 10446d884e8fSFred 1045061ae2edSFred Isaman static void nfs_async_write_error(struct list_head *head) 10466c75dc0dSFred Isaman { 10476c75dc0dSFred Isaman struct nfs_page *req; 10486c75dc0dSFred Isaman 10496c75dc0dSFred Isaman while (!list_empty(head)) { 10506c75dc0dSFred Isaman req = nfs_list_entry(head->next); 10516c75dc0dSFred Isaman nfs_list_remove_request(req); 10526c75dc0dSFred Isaman nfs_redirty_request(req); 10536c75dc0dSFred Isaman } 10546c75dc0dSFred Isaman } 10556c75dc0dSFred Isaman 1056061ae2edSFred Isaman static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { 1057061ae2edSFred Isaman .error_cleanup = nfs_async_write_error, 1058061ae2edSFred Isaman .completion = nfs_write_completion, 1059061ae2edSFred Isaman }; 1060061ae2edSFred Isaman 106157208fa7SBryan Schumaker void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, 1062a20c93e3SChristoph Hellwig struct inode *inode, int ioflags, bool force_mds, 1063061ae2edSFred Isaman const struct nfs_pgio_completion_ops *compl_ops) 10641751c363STrond Myklebust { 1065a20c93e3SChristoph Hellwig struct nfs_server *server = NFS_SERVER(inode); 106641d8d5b7SAnna Schumaker const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; 1067a20c93e3SChristoph Hellwig 1068a20c93e3SChristoph Hellwig #ifdef CONFIG_NFS_V4_1 1069a20c93e3SChristoph Hellwig if (server->pnfs_curr_ld && !force_mds) 1070a20c93e3SChristoph Hellwig pg_ops = server->pnfs_curr_ld->pg_write_ops; 1071a20c93e3SChristoph Hellwig #endif 10724a0de55cSAnna Schumaker nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops, 10734a0de55cSAnna Schumaker server->wsize, ioflags); 10741751c363STrond Myklebust } 1075ddda8e0aSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_pageio_init_write); 10761751c363STrond Myklebust 1077dce81290STrond Myklebust void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) 1078dce81290STrond Myklebust { 107941d8d5b7SAnna Schumaker pgio->pg_ops = &nfs_pgio_rw_ops; 1080dce81290STrond Myklebust pgio->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; 1081dce81290STrond Myklebust } 10821f945357STrond Myklebust EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); 1083dce81290STrond Myklebust 10841da177e4SLinus Torvalds 10850b7c0153SFred Isaman void nfs_commit_prepare(struct rpc_task *task, void *calldata) 10860b7c0153SFred Isaman { 10870b7c0153SFred Isaman struct nfs_commit_data *data = calldata; 10880b7c0153SFred Isaman 10890b7c0153SFred Isaman NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); 10900b7c0153SFred Isaman } 10910b7c0153SFred Isaman 1092a4cdda59SAnna Schumaker static void nfs_writeback_release_common(struct nfs_pgio_data *data) 10931da177e4SLinus Torvalds { 1094cd841605SFred Isaman struct nfs_pgio_header *hdr = data->header; 1095e2fecb21STrond Myklebust int status = data->task.tk_status; 1096788e7a89STrond Myklebust 10976c75dc0dSFred Isaman if ((status >= 0) && nfs_write_need_commit(data)) { 10986c75dc0dSFred Isaman spin_lock(&hdr->lock); 10996c75dc0dSFred Isaman if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) 11006c75dc0dSFred Isaman ; /* Do nothing */ 11016c75dc0dSFred Isaman else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) 1102f79d06f5SAnna Schumaker memcpy(&hdr->verf, &data->verf, sizeof(hdr->verf)); 1103f79d06f5SAnna Schumaker else if (memcmp(&hdr->verf, &data->verf, sizeof(hdr->verf))) 11046c75dc0dSFred Isaman set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); 11056c75dc0dSFred Isaman spin_unlock(&hdr->lock); 11061da177e4SLinus Torvalds } 11071da177e4SLinus Torvalds } 11081da177e4SLinus Torvalds 11091f2edbe3STrond Myklebust /* 11101f2edbe3STrond Myklebust * Special version of should_remove_suid() that ignores capabilities. 11111f2edbe3STrond Myklebust */ 11121f2edbe3STrond Myklebust static int nfs_should_remove_suid(const struct inode *inode) 11131f2edbe3STrond Myklebust { 11141f2edbe3STrond Myklebust umode_t mode = inode->i_mode; 11151f2edbe3STrond Myklebust int kill = 0; 1116788e7a89STrond Myklebust 11171f2edbe3STrond Myklebust /* suid always must be killed */ 11181f2edbe3STrond Myklebust if (unlikely(mode & S_ISUID)) 11191f2edbe3STrond Myklebust kill = ATTR_KILL_SUID; 11201f2edbe3STrond Myklebust 11211f2edbe3STrond Myklebust /* 11221f2edbe3STrond Myklebust * sgid without any exec bits is just a mandatory locking mark; leave 11231f2edbe3STrond Myklebust * it alone. If some exec bits are set, it's a real sgid; kill it. 11241f2edbe3STrond Myklebust */ 11251f2edbe3STrond Myklebust if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 11261f2edbe3STrond Myklebust kill |= ATTR_KILL_SGID; 11271f2edbe3STrond Myklebust 11281f2edbe3STrond Myklebust if (unlikely(kill && S_ISREG(mode))) 11291f2edbe3STrond Myklebust return kill; 11301f2edbe3STrond Myklebust 11311f2edbe3STrond Myklebust return 0; 11321f2edbe3STrond Myklebust } 1133788e7a89STrond Myklebust 11341da177e4SLinus Torvalds /* 11351da177e4SLinus Torvalds * This function is called when the WRITE call is complete. 11361da177e4SLinus Torvalds */ 11370eecb214SAnna Schumaker static int nfs_writeback_done(struct rpc_task *task, struct nfs_pgio_data *data, 11380eecb214SAnna Schumaker struct inode *inode) 11391da177e4SLinus Torvalds { 1140788e7a89STrond Myklebust int status; 11411da177e4SLinus Torvalds 1142f551e44fSChuck Lever /* 1143f551e44fSChuck Lever * ->write_done will attempt to use post-op attributes to detect 1144f551e44fSChuck Lever * conflicting writes by other clients. A strict interpretation 1145f551e44fSChuck Lever * of close-to-open would allow us to continue caching even if 1146f551e44fSChuck Lever * another writer had changed the file, but some applications 1147f551e44fSChuck Lever * depend on tighter cache coherency when writing. 1148f551e44fSChuck Lever */ 1149cd841605SFred Isaman status = NFS_PROTO(inode)->write_done(task, data); 1150788e7a89STrond Myklebust if (status != 0) 11510eecb214SAnna Schumaker return status; 11520eecb214SAnna Schumaker nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, data->res.count); 115391d5b470SChuck Lever 115489d77c8fSBryan Schumaker #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) 11550eecb214SAnna Schumaker if (data->res.verf->committed < data->args.stable && task->tk_status >= 0) { 11561da177e4SLinus Torvalds /* We tried a write call, but the server did not 11571da177e4SLinus Torvalds * commit data to stable storage even though we 11581da177e4SLinus Torvalds * requested it. 11591da177e4SLinus Torvalds * Note: There is a known bug in Tru64 < 5.0 in which 11601da177e4SLinus Torvalds * the server reports NFS_DATA_SYNC, but performs 11611da177e4SLinus Torvalds * NFS_FILE_SYNC. We therefore implement this checking 11621da177e4SLinus Torvalds * as a dprintk() in order to avoid filling syslog. 11631da177e4SLinus Torvalds */ 11641da177e4SLinus Torvalds static unsigned long complain; 11651da177e4SLinus Torvalds 1166a69aef14SFred Isaman /* Note this will print the MDS for a DS write */ 11671da177e4SLinus Torvalds if (time_before(complain, jiffies)) { 11681da177e4SLinus Torvalds dprintk("NFS: faulty NFS server %s:" 11691da177e4SLinus Torvalds " (committed = %d) != (stable = %d)\n", 1170cd841605SFred Isaman NFS_SERVER(inode)->nfs_client->cl_hostname, 11710eecb214SAnna Schumaker data->res.verf->committed, data->args.stable); 11721da177e4SLinus Torvalds complain = jiffies + 300 * HZ; 11731da177e4SLinus Torvalds } 11741da177e4SLinus Torvalds } 11751da177e4SLinus Torvalds #endif 11761f2edbe3STrond Myklebust 11771f2edbe3STrond Myklebust /* Deal with the suid/sgid bit corner case */ 11781f2edbe3STrond Myklebust if (nfs_should_remove_suid(inode)) 11791f2edbe3STrond Myklebust nfs_mark_for_revalidate(inode); 11800eecb214SAnna Schumaker return 0; 11810eecb214SAnna Schumaker } 11820eecb214SAnna Schumaker 11830eecb214SAnna Schumaker /* 11840eecb214SAnna Schumaker * This function is called when the WRITE call is complete. 11850eecb214SAnna Schumaker */ 11860eecb214SAnna Schumaker static void nfs_writeback_result(struct rpc_task *task, struct nfs_pgio_data *data) 11870eecb214SAnna Schumaker { 11880eecb214SAnna Schumaker struct nfs_pgio_args *argp = &data->args; 11890eecb214SAnna Schumaker struct nfs_pgio_res *resp = &data->res; 11901f2edbe3STrond Myklebust 11911f2edbe3STrond Myklebust if (resp->count < argp->count) { 11921da177e4SLinus Torvalds static unsigned long complain; 11931da177e4SLinus Torvalds 11946c75dc0dSFred Isaman /* This a short write! */ 11950eecb214SAnna Schumaker nfs_inc_stats(data->header->inode, NFSIOS_SHORTWRITE); 119691d5b470SChuck Lever 11971da177e4SLinus Torvalds /* Has the server at least made some progress? */ 11986c75dc0dSFred Isaman if (resp->count == 0) { 11996c75dc0dSFred Isaman if (time_before(complain, jiffies)) { 12006c75dc0dSFred Isaman printk(KERN_WARNING 12016c75dc0dSFred Isaman "NFS: Server wrote zero bytes, expected %u.\n", 12026c75dc0dSFred Isaman argp->count); 12036c75dc0dSFred Isaman complain = jiffies + 300 * HZ; 12046c75dc0dSFred Isaman } 12056c75dc0dSFred Isaman nfs_set_pgio_error(data->header, -EIO, argp->offset); 12066c75dc0dSFred Isaman task->tk_status = -EIO; 12076c75dc0dSFred Isaman return; 12086c75dc0dSFred Isaman } 12091da177e4SLinus Torvalds /* Was this an NFSv2 write or an NFSv3 stable write? */ 12101da177e4SLinus Torvalds if (resp->verf->committed != NFS_UNSTABLE) { 12111da177e4SLinus Torvalds /* Resend from where the server left off */ 1212a69aef14SFred Isaman data->mds_offset += resp->count; 12131da177e4SLinus Torvalds argp->offset += resp->count; 12141da177e4SLinus Torvalds argp->pgbase += resp->count; 12151da177e4SLinus Torvalds argp->count -= resp->count; 12161da177e4SLinus Torvalds } else { 12171da177e4SLinus Torvalds /* Resend as a stable write in order to avoid 12181da177e4SLinus Torvalds * headaches in the case of a server crash. 12191da177e4SLinus Torvalds */ 12201da177e4SLinus Torvalds argp->stable = NFS_FILE_SYNC; 12211da177e4SLinus Torvalds } 1222d00c5d43STrond Myklebust rpc_restart_call_prepare(task); 12231da177e4SLinus Torvalds } 12241da177e4SLinus Torvalds } 12251da177e4SLinus Torvalds 12261da177e4SLinus Torvalds 122789d77c8fSBryan Schumaker #if IS_ENABLED(CONFIG_NFS_V3) || IS_ENABLED(CONFIG_NFS_V4) 122871d0a611STrond Myklebust static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait) 122971d0a611STrond Myklebust { 1230b8413f98STrond Myklebust int ret; 1231b8413f98STrond Myklebust 123271d0a611STrond Myklebust if (!test_and_set_bit(NFS_INO_COMMIT, &nfsi->flags)) 123371d0a611STrond Myklebust return 1; 1234b8413f98STrond Myklebust if (!may_wait) 123571d0a611STrond Myklebust return 0; 1236b8413f98STrond Myklebust ret = out_of_line_wait_on_bit_lock(&nfsi->flags, 1237b8413f98STrond Myklebust NFS_INO_COMMIT, 1238b8413f98STrond Myklebust nfs_wait_bit_killable, 1239b8413f98STrond Myklebust TASK_KILLABLE); 1240b8413f98STrond Myklebust return (ret < 0) ? ret : 1; 124171d0a611STrond Myklebust } 124271d0a611STrond Myklebust 1243f453a54aSFred Isaman static void nfs_commit_clear_lock(struct nfs_inode *nfsi) 124471d0a611STrond Myklebust { 124571d0a611STrond Myklebust clear_bit(NFS_INO_COMMIT, &nfsi->flags); 12464e857c58SPeter Zijlstra smp_mb__after_atomic(); 124771d0a611STrond Myklebust wake_up_bit(&nfsi->flags, NFS_INO_COMMIT); 124871d0a611STrond Myklebust } 124971d0a611STrond Myklebust 12500b7c0153SFred Isaman void nfs_commitdata_release(struct nfs_commit_data *data) 12511da177e4SLinus Torvalds { 12520b7c0153SFred Isaman put_nfs_open_context(data->context); 12530b7c0153SFred Isaman nfs_commit_free(data); 12541da177e4SLinus Torvalds } 1255e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_commitdata_release); 12561da177e4SLinus Torvalds 12570b7c0153SFred Isaman int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, 12589ace33cdSFred Isaman const struct rpc_call_ops *call_ops, 12599f0ec176SAndy Adamson int how, int flags) 12601da177e4SLinus Torvalds { 126107737691STrond Myklebust struct rpc_task *task; 12629ace33cdSFred Isaman int priority = flush_task_priority(how); 1263bdc7f021STrond Myklebust struct rpc_message msg = { 1264bdc7f021STrond Myklebust .rpc_argp = &data->args, 1265bdc7f021STrond Myklebust .rpc_resp = &data->res, 12669ace33cdSFred Isaman .rpc_cred = data->cred, 1267bdc7f021STrond Myklebust }; 126884115e1cSTrond Myklebust struct rpc_task_setup task_setup_data = { 126907737691STrond Myklebust .task = &data->task, 12709ace33cdSFred Isaman .rpc_client = clnt, 1271bdc7f021STrond Myklebust .rpc_message = &msg, 12729ace33cdSFred Isaman .callback_ops = call_ops, 127384115e1cSTrond Myklebust .callback_data = data, 1274101070caSTrond Myklebust .workqueue = nfsiod_workqueue, 12759f0ec176SAndy Adamson .flags = RPC_TASK_ASYNC | flags, 12763ff7576dSTrond Myklebust .priority = priority, 127784115e1cSTrond Myklebust }; 1278788e7a89STrond Myklebust /* Set up the initial task struct. */ 12799ace33cdSFred Isaman NFS_PROTO(data->inode)->commit_setup(data, &msg); 12801da177e4SLinus Torvalds 1281a3f565b1SChuck Lever dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid); 1282bdc7f021STrond Myklebust 12838c21c62cSWeston Andros Adamson nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client, 12848c21c62cSWeston Andros Adamson NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg); 12858c21c62cSWeston Andros Adamson 128607737691STrond Myklebust task = rpc_run_task(&task_setup_data); 1287dbae4c73STrond Myklebust if (IS_ERR(task)) 1288dbae4c73STrond Myklebust return PTR_ERR(task); 1289d2224e7aSJeff Layton if (how & FLUSH_SYNC) 1290d2224e7aSJeff Layton rpc_wait_for_completion_task(task); 129107737691STrond Myklebust rpc_put_task(task); 1292dbae4c73STrond Myklebust return 0; 12931da177e4SLinus Torvalds } 1294e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_initiate_commit); 12951da177e4SLinus Torvalds 12961da177e4SLinus Torvalds /* 12979ace33cdSFred Isaman * Set up the argument/result storage required for the RPC call. 12989ace33cdSFred Isaman */ 12990b7c0153SFred Isaman void nfs_init_commit(struct nfs_commit_data *data, 1300988b6dceSFred Isaman struct list_head *head, 1301f453a54aSFred Isaman struct pnfs_layout_segment *lseg, 1302f453a54aSFred Isaman struct nfs_commit_info *cinfo) 13039ace33cdSFred Isaman { 13049ace33cdSFred Isaman struct nfs_page *first = nfs_list_entry(head->next); 13053d4ff43dSAl Viro struct inode *inode = first->wb_context->dentry->d_inode; 13069ace33cdSFred Isaman 13079ace33cdSFred Isaman /* Set up the RPC argument and reply structs 13089ace33cdSFred Isaman * NB: take care not to mess about with data->commit et al. */ 13099ace33cdSFred Isaman 13109ace33cdSFred Isaman list_splice_init(head, &data->pages); 13119ace33cdSFred Isaman 13129ace33cdSFred Isaman data->inode = inode; 13139ace33cdSFred Isaman data->cred = first->wb_context->cred; 1314988b6dceSFred Isaman data->lseg = lseg; /* reference transferred */ 13159ace33cdSFred Isaman data->mds_ops = &nfs_commit_ops; 1316f453a54aSFred Isaman data->completion_ops = cinfo->completion_ops; 1317b359f9d0SFred Isaman data->dreq = cinfo->dreq; 13189ace33cdSFred Isaman 13199ace33cdSFred Isaman data->args.fh = NFS_FH(data->inode); 13209ace33cdSFred Isaman /* Note: we always request a commit of the entire inode */ 13219ace33cdSFred Isaman data->args.offset = 0; 13229ace33cdSFred Isaman data->args.count = 0; 13230b7c0153SFred Isaman data->context = get_nfs_open_context(first->wb_context); 13249ace33cdSFred Isaman data->res.fattr = &data->fattr; 13259ace33cdSFred Isaman data->res.verf = &data->verf; 13269ace33cdSFred Isaman nfs_fattr_init(&data->fattr); 13279ace33cdSFred Isaman } 1328e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_init_commit); 13299ace33cdSFred Isaman 1330e0c2b380SFred Isaman void nfs_retry_commit(struct list_head *page_list, 1331ea2cf228SFred Isaman struct pnfs_layout_segment *lseg, 1332ea2cf228SFred Isaman struct nfs_commit_info *cinfo) 133364bfeb49SFred Isaman { 133464bfeb49SFred Isaman struct nfs_page *req; 133564bfeb49SFred Isaman 133664bfeb49SFred Isaman while (!list_empty(page_list)) { 133764bfeb49SFred Isaman req = nfs_list_entry(page_list->next); 133864bfeb49SFred Isaman nfs_list_remove_request(req); 1339ea2cf228SFred Isaman nfs_mark_request_commit(req, lseg, cinfo); 134056f9cd68SFred Isaman if (!cinfo->dreq) { 134164bfeb49SFred Isaman dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1342d56b4ddfSMel Gorman dec_bdi_stat(page_file_mapping(req->wb_page)->backing_dev_info, 134364bfeb49SFred Isaman BDI_RECLAIMABLE); 134456f9cd68SFred Isaman } 13451d1afcbcSTrond Myklebust nfs_unlock_and_release_request(req); 134664bfeb49SFred Isaman } 134764bfeb49SFred Isaman } 1348e0c2b380SFred Isaman EXPORT_SYMBOL_GPL(nfs_retry_commit); 134964bfeb49SFred Isaman 13509ace33cdSFred Isaman /* 13511da177e4SLinus Torvalds * Commit dirty pages 13521da177e4SLinus Torvalds */ 13531da177e4SLinus Torvalds static int 1354ea2cf228SFred Isaman nfs_commit_list(struct inode *inode, struct list_head *head, int how, 1355ea2cf228SFred Isaman struct nfs_commit_info *cinfo) 13561da177e4SLinus Torvalds { 13570b7c0153SFred Isaman struct nfs_commit_data *data; 13581da177e4SLinus Torvalds 1359c9d8f89dSTrond Myklebust data = nfs_commitdata_alloc(); 13601da177e4SLinus Torvalds 13611da177e4SLinus Torvalds if (!data) 13621da177e4SLinus Torvalds goto out_bad; 13631da177e4SLinus Torvalds 13641da177e4SLinus Torvalds /* Set up the argument struct */ 1365f453a54aSFred Isaman nfs_init_commit(data, head, NULL, cinfo); 1366f453a54aSFred Isaman atomic_inc(&cinfo->mds->rpcs_out); 13679f0ec176SAndy Adamson return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops, 13689f0ec176SAndy Adamson how, 0); 13691da177e4SLinus Torvalds out_bad: 1370ea2cf228SFred Isaman nfs_retry_commit(head, NULL, cinfo); 1371f453a54aSFred Isaman cinfo->completion_ops->error_cleanup(NFS_I(inode)); 13721da177e4SLinus Torvalds return -ENOMEM; 13731da177e4SLinus Torvalds } 13741da177e4SLinus Torvalds 13751da177e4SLinus Torvalds /* 13761da177e4SLinus Torvalds * COMMIT call returned 13771da177e4SLinus Torvalds */ 1378788e7a89STrond Myklebust static void nfs_commit_done(struct rpc_task *task, void *calldata) 13791da177e4SLinus Torvalds { 13800b7c0153SFred Isaman struct nfs_commit_data *data = calldata; 13811da177e4SLinus Torvalds 1382a3f565b1SChuck Lever dprintk("NFS: %5u nfs_commit_done (status %d)\n", 13831da177e4SLinus Torvalds task->tk_pid, task->tk_status); 13841da177e4SLinus Torvalds 1385788e7a89STrond Myklebust /* Call the NFS version-specific code */ 1386c0d0e96bSTrond Myklebust NFS_PROTO(data->inode)->commit_done(task, data); 1387c9d8f89dSTrond Myklebust } 1388c9d8f89dSTrond Myklebust 1389f453a54aSFred Isaman static void nfs_commit_release_pages(struct nfs_commit_data *data) 1390c9d8f89dSTrond Myklebust { 1391c9d8f89dSTrond Myklebust struct nfs_page *req; 1392c9d8f89dSTrond Myklebust int status = data->task.tk_status; 1393f453a54aSFred Isaman struct nfs_commit_info cinfo; 1394788e7a89STrond Myklebust 13951da177e4SLinus Torvalds while (!list_empty(&data->pages)) { 13961da177e4SLinus Torvalds req = nfs_list_entry(data->pages.next); 13971da177e4SLinus Torvalds nfs_list_remove_request(req); 1398d6d6dc7cSFred Isaman nfs_clear_page_commit(req->wb_page); 13991da177e4SLinus Torvalds 14001e8968c5SNiels de Vos dprintk("NFS: commit (%s/%llu %d@%lld)", 14013d4ff43dSAl Viro req->wb_context->dentry->d_sb->s_id, 14021e8968c5SNiels de Vos (unsigned long long)NFS_FILEID(req->wb_context->dentry->d_inode), 14031da177e4SLinus Torvalds req->wb_bytes, 14041da177e4SLinus Torvalds (long long)req_offset(req)); 1405c9d8f89dSTrond Myklebust if (status < 0) { 1406c9d8f89dSTrond Myklebust nfs_context_set_write_error(req->wb_context, status); 14071da177e4SLinus Torvalds nfs_inode_remove_request(req); 1408c9d8f89dSTrond Myklebust dprintk(", error = %d\n", status); 14091da177e4SLinus Torvalds goto next; 14101da177e4SLinus Torvalds } 14111da177e4SLinus Torvalds 14121da177e4SLinus Torvalds /* Okay, COMMIT succeeded, apparently. Check the verifier 14131da177e4SLinus Torvalds * returned by the server against all stored verfs. */ 14142f2c63bcSTrond Myklebust if (!memcmp(&req->wb_verf, &data->verf.verifier, sizeof(req->wb_verf))) { 14151da177e4SLinus Torvalds /* We have a match */ 14161da177e4SLinus Torvalds nfs_inode_remove_request(req); 14171da177e4SLinus Torvalds dprintk(" OK\n"); 14181da177e4SLinus Torvalds goto next; 14191da177e4SLinus Torvalds } 14201da177e4SLinus Torvalds /* We have a mismatch. Write the page again */ 14211da177e4SLinus Torvalds dprintk(" mismatch\n"); 14226d884e8fSFred nfs_mark_request_dirty(req); 142305990d1bSTrond Myklebust set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); 14241da177e4SLinus Torvalds next: 14251d1afcbcSTrond Myklebust nfs_unlock_and_release_request(req); 14261da177e4SLinus Torvalds } 1427f453a54aSFred Isaman nfs_init_cinfo(&cinfo, data->inode, data->dreq); 1428f453a54aSFred Isaman if (atomic_dec_and_test(&cinfo.mds->rpcs_out)) 1429f453a54aSFred Isaman nfs_commit_clear_lock(NFS_I(data->inode)); 14305917ce84SFred Isaman } 14315917ce84SFred Isaman 14325917ce84SFred Isaman static void nfs_commit_release(void *calldata) 14335917ce84SFred Isaman { 14340b7c0153SFred Isaman struct nfs_commit_data *data = calldata; 14355917ce84SFred Isaman 1436f453a54aSFred Isaman data->completion_ops->completion(data); 1437c9d8f89dSTrond Myklebust nfs_commitdata_release(calldata); 14381da177e4SLinus Torvalds } 1439788e7a89STrond Myklebust 1440788e7a89STrond Myklebust static const struct rpc_call_ops nfs_commit_ops = { 14410b7c0153SFred Isaman .rpc_call_prepare = nfs_commit_prepare, 1442788e7a89STrond Myklebust .rpc_call_done = nfs_commit_done, 1443788e7a89STrond Myklebust .rpc_release = nfs_commit_release, 1444788e7a89STrond Myklebust }; 14451da177e4SLinus Torvalds 1446f453a54aSFred Isaman static const struct nfs_commit_completion_ops nfs_commit_completion_ops = { 1447f453a54aSFred Isaman .completion = nfs_commit_release_pages, 1448f453a54aSFred Isaman .error_cleanup = nfs_commit_clear_lock, 1449f453a54aSFred Isaman }; 1450f453a54aSFred Isaman 14511763da12SFred Isaman int nfs_generic_commit_list(struct inode *inode, struct list_head *head, 1452ea2cf228SFred Isaman int how, struct nfs_commit_info *cinfo) 145384c53ab5SFred Isaman { 145484c53ab5SFred Isaman int status; 145584c53ab5SFred Isaman 1456ea2cf228SFred Isaman status = pnfs_commit_list(inode, head, how, cinfo); 145784c53ab5SFred Isaman if (status == PNFS_NOT_ATTEMPTED) 1458ea2cf228SFred Isaman status = nfs_commit_list(inode, head, how, cinfo); 145984c53ab5SFred Isaman return status; 146084c53ab5SFred Isaman } 146184c53ab5SFred Isaman 1462b608b283STrond Myklebust int nfs_commit_inode(struct inode *inode, int how) 14631da177e4SLinus Torvalds { 14641da177e4SLinus Torvalds LIST_HEAD(head); 1465ea2cf228SFred Isaman struct nfs_commit_info cinfo; 146671d0a611STrond Myklebust int may_wait = how & FLUSH_SYNC; 1467b8413f98STrond Myklebust int res; 14681da177e4SLinus Torvalds 1469b8413f98STrond Myklebust res = nfs_commit_set_lock(NFS_I(inode), may_wait); 1470b8413f98STrond Myklebust if (res <= 0) 1471c5efa5fcSTrond Myklebust goto out_mark_dirty; 1472ea2cf228SFred Isaman nfs_init_cinfo_from_inode(&cinfo, inode); 1473ea2cf228SFred Isaman res = nfs_scan_commit(inode, &head, &cinfo); 14741da177e4SLinus Torvalds if (res) { 1475a861a1e1SFred Isaman int error; 1476a861a1e1SFred Isaman 1477ea2cf228SFred Isaman error = nfs_generic_commit_list(inode, &head, how, &cinfo); 14781da177e4SLinus Torvalds if (error < 0) 14791da177e4SLinus Torvalds return error; 1480b8413f98STrond Myklebust if (!may_wait) 1481b8413f98STrond Myklebust goto out_mark_dirty; 1482b8413f98STrond Myklebust error = wait_on_bit(&NFS_I(inode)->flags, 1483b8413f98STrond Myklebust NFS_INO_COMMIT, 148471d0a611STrond Myklebust nfs_wait_bit_killable, 148571d0a611STrond Myklebust TASK_KILLABLE); 1486b8413f98STrond Myklebust if (error < 0) 1487b8413f98STrond Myklebust return error; 148871d0a611STrond Myklebust } else 148971d0a611STrond Myklebust nfs_commit_clear_lock(NFS_I(inode)); 1490c5efa5fcSTrond Myklebust return res; 1491c5efa5fcSTrond Myklebust /* Note: If we exit without ensuring that the commit is complete, 1492c5efa5fcSTrond Myklebust * we must mark the inode as dirty. Otherwise, future calls to 1493c5efa5fcSTrond Myklebust * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure 1494c5efa5fcSTrond Myklebust * that the data is on the disk. 1495c5efa5fcSTrond Myklebust */ 1496c5efa5fcSTrond Myklebust out_mark_dirty: 1497c5efa5fcSTrond Myklebust __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 14981da177e4SLinus Torvalds return res; 14991da177e4SLinus Torvalds } 15008fc795f7STrond Myklebust 15018fc795f7STrond Myklebust static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) 15028fc795f7STrond Myklebust { 1503420e3646STrond Myklebust struct nfs_inode *nfsi = NFS_I(inode); 1504420e3646STrond Myklebust int flags = FLUSH_SYNC; 1505420e3646STrond Myklebust int ret = 0; 15068fc795f7STrond Myklebust 15073236c3e1SJeff Layton /* no commits means nothing needs to be done */ 1508ea2cf228SFred Isaman if (!nfsi->commit_info.ncommit) 15093236c3e1SJeff Layton return ret; 15103236c3e1SJeff Layton 1511a00dd6c0SJeff Layton if (wbc->sync_mode == WB_SYNC_NONE) { 1512a00dd6c0SJeff Layton /* Don't commit yet if this is a non-blocking flush and there 1513a00dd6c0SJeff Layton * are a lot of outstanding writes for this mapping. 1514420e3646STrond Myklebust */ 1515ea2cf228SFred Isaman if (nfsi->commit_info.ncommit <= (nfsi->npages >> 1)) 1516420e3646STrond Myklebust goto out_mark_dirty; 1517420e3646STrond Myklebust 1518a00dd6c0SJeff Layton /* don't wait for the COMMIT response */ 1519420e3646STrond Myklebust flags = 0; 1520a00dd6c0SJeff Layton } 1521a00dd6c0SJeff Layton 1522420e3646STrond Myklebust ret = nfs_commit_inode(inode, flags); 1523420e3646STrond Myklebust if (ret >= 0) { 1524420e3646STrond Myklebust if (wbc->sync_mode == WB_SYNC_NONE) { 1525420e3646STrond Myklebust if (ret < wbc->nr_to_write) 1526420e3646STrond Myklebust wbc->nr_to_write -= ret; 1527420e3646STrond Myklebust else 1528420e3646STrond Myklebust wbc->nr_to_write = 0; 1529420e3646STrond Myklebust } 15308fc795f7STrond Myklebust return 0; 1531420e3646STrond Myklebust } 1532420e3646STrond Myklebust out_mark_dirty: 15338fc795f7STrond Myklebust __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 15348fc795f7STrond Myklebust return ret; 15358fc795f7STrond Myklebust } 1536c63c7b05STrond Myklebust #else 15378fc795f7STrond Myklebust static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_control *wbc) 15388fc795f7STrond Myklebust { 15398fc795f7STrond Myklebust return 0; 15408fc795f7STrond Myklebust } 15411da177e4SLinus Torvalds #endif 15421da177e4SLinus Torvalds 15438fc795f7STrond Myklebust int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 15448fc795f7STrond Myklebust { 1545a8d8f02cSBryan Schumaker return nfs_commit_unstable_pages(inode, wbc); 1546a8d8f02cSBryan Schumaker } 154789d77c8fSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_write_inode); 1548863a3c6cSAndy Adamson 1549acdc53b2STrond Myklebust /* 1550acdc53b2STrond Myklebust * flush the inode to disk. 1551acdc53b2STrond Myklebust */ 1552acdc53b2STrond Myklebust int nfs_wb_all(struct inode *inode) 155334901f70STrond Myklebust { 155434901f70STrond Myklebust struct writeback_control wbc = { 155572cb77f4STrond Myklebust .sync_mode = WB_SYNC_ALL, 155634901f70STrond Myklebust .nr_to_write = LONG_MAX, 1557d7fb1207STrond Myklebust .range_start = 0, 1558d7fb1207STrond Myklebust .range_end = LLONG_MAX, 155934901f70STrond Myklebust }; 1560f4ce1299STrond Myklebust int ret; 156134901f70STrond Myklebust 1562f4ce1299STrond Myklebust trace_nfs_writeback_inode_enter(inode); 1563f4ce1299STrond Myklebust 1564f4ce1299STrond Myklebust ret = sync_inode(inode, &wbc); 1565f4ce1299STrond Myklebust 1566f4ce1299STrond Myklebust trace_nfs_writeback_inode_exit(inode, ret); 1567f4ce1299STrond Myklebust return ret; 15681c75950bSTrond Myklebust } 1569ddda8e0aSBryan Schumaker EXPORT_SYMBOL_GPL(nfs_wb_all); 15701c75950bSTrond Myklebust 15711b3b4a1aSTrond Myklebust int nfs_wb_page_cancel(struct inode *inode, struct page *page) 15721b3b4a1aSTrond Myklebust { 15731b3b4a1aSTrond Myklebust struct nfs_page *req; 15741b3b4a1aSTrond Myklebust int ret = 0; 15751b3b4a1aSTrond Myklebust 15761b3b4a1aSTrond Myklebust for (;;) { 1577ba8b06e6STrond Myklebust wait_on_page_writeback(page); 15781b3b4a1aSTrond Myklebust req = nfs_page_find_request(page); 15791b3b4a1aSTrond Myklebust if (req == NULL) 15801b3b4a1aSTrond Myklebust break; 15817ad84aa9STrond Myklebust if (nfs_lock_request(req)) { 15828dd37758STrond Myklebust nfs_clear_request_commit(req); 15831b3b4a1aSTrond Myklebust nfs_inode_remove_request(req); 15841b3b4a1aSTrond Myklebust /* 15851b3b4a1aSTrond Myklebust * In case nfs_inode_remove_request has marked the 15861b3b4a1aSTrond Myklebust * page as being dirty 15871b3b4a1aSTrond Myklebust */ 15881b3b4a1aSTrond Myklebust cancel_dirty_page(page, PAGE_CACHE_SIZE); 15891d1afcbcSTrond Myklebust nfs_unlock_and_release_request(req); 15901b3b4a1aSTrond Myklebust break; 15911b3b4a1aSTrond Myklebust } 15921b3b4a1aSTrond Myklebust ret = nfs_wait_on_request(req); 1593c9edda71STrond Myklebust nfs_release_request(req); 15941b3b4a1aSTrond Myklebust if (ret < 0) 1595c988950eSTrond Myklebust break; 15961b3b4a1aSTrond Myklebust } 15971b3b4a1aSTrond Myklebust return ret; 15981b3b4a1aSTrond Myklebust } 15991b3b4a1aSTrond Myklebust 16001c75950bSTrond Myklebust /* 16011c75950bSTrond Myklebust * Write back all requests on one page - we do this before reading it. 16021c75950bSTrond Myklebust */ 16031c75950bSTrond Myklebust int nfs_wb_page(struct inode *inode, struct page *page) 16041c75950bSTrond Myklebust { 160529418aa4SMel Gorman loff_t range_start = page_file_offset(page); 16067f2f12d9STrond Myklebust loff_t range_end = range_start + (loff_t)(PAGE_CACHE_SIZE - 1); 16077f2f12d9STrond Myklebust struct writeback_control wbc = { 16087f2f12d9STrond Myklebust .sync_mode = WB_SYNC_ALL, 16097f2f12d9STrond Myklebust .nr_to_write = 0, 16107f2f12d9STrond Myklebust .range_start = range_start, 16117f2f12d9STrond Myklebust .range_end = range_end, 16127f2f12d9STrond Myklebust }; 16137f2f12d9STrond Myklebust int ret; 16147f2f12d9STrond Myklebust 1615f4ce1299STrond Myklebust trace_nfs_writeback_page_enter(inode); 1616f4ce1299STrond Myklebust 16170522f6adSTrond Myklebust for (;;) { 1618ba8b06e6STrond Myklebust wait_on_page_writeback(page); 16197f2f12d9STrond Myklebust if (clear_page_dirty_for_io(page)) { 16207f2f12d9STrond Myklebust ret = nfs_writepage_locked(page, &wbc); 16217f2f12d9STrond Myklebust if (ret < 0) 16227f2f12d9STrond Myklebust goto out_error; 16230522f6adSTrond Myklebust continue; 16247f2f12d9STrond Myklebust } 1625f4ce1299STrond Myklebust ret = 0; 16260522f6adSTrond Myklebust if (!PagePrivate(page)) 16270522f6adSTrond Myklebust break; 16280522f6adSTrond Myklebust ret = nfs_commit_inode(inode, FLUSH_SYNC); 16297f2f12d9STrond Myklebust if (ret < 0) 16307f2f12d9STrond Myklebust goto out_error; 16317f2f12d9STrond Myklebust } 16327f2f12d9STrond Myklebust out_error: 1633f4ce1299STrond Myklebust trace_nfs_writeback_page_exit(inode, ret); 16347f2f12d9STrond Myklebust return ret; 16351c75950bSTrond Myklebust } 16361c75950bSTrond Myklebust 1637074cc1deSTrond Myklebust #ifdef CONFIG_MIGRATION 1638074cc1deSTrond Myklebust int nfs_migrate_page(struct address_space *mapping, struct page *newpage, 1639a6bc32b8SMel Gorman struct page *page, enum migrate_mode mode) 1640074cc1deSTrond Myklebust { 16412da95652SJeff Layton /* 16422da95652SJeff Layton * If PagePrivate is set, then the page is currently associated with 16432da95652SJeff Layton * an in-progress read or write request. Don't try to migrate it. 16442da95652SJeff Layton * 16452da95652SJeff Layton * FIXME: we could do this in principle, but we'll need a way to ensure 16462da95652SJeff Layton * that we can safely release the inode reference while holding 16472da95652SJeff Layton * the page lock. 16482da95652SJeff Layton */ 16492da95652SJeff Layton if (PagePrivate(page)) 16502da95652SJeff Layton return -EBUSY; 1651074cc1deSTrond Myklebust 16528c209ce7SDavid Howells if (!nfs_fscache_release_page(page, GFP_KERNEL)) 16538c209ce7SDavid Howells return -EBUSY; 1654074cc1deSTrond Myklebust 1655a6bc32b8SMel Gorman return migrate_page(mapping, newpage, page, mode); 1656074cc1deSTrond Myklebust } 1657074cc1deSTrond Myklebust #endif 1658074cc1deSTrond Myklebust 1659f7b422b1SDavid Howells int __init nfs_init_writepagecache(void) 16601da177e4SLinus Torvalds { 16611da177e4SLinus Torvalds nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 1662c0752cdfSAnna Schumaker sizeof(struct nfs_rw_header), 16631da177e4SLinus Torvalds 0, SLAB_HWCACHE_ALIGN, 166420c2df83SPaul Mundt NULL); 16651da177e4SLinus Torvalds if (nfs_wdata_cachep == NULL) 16661da177e4SLinus Torvalds return -ENOMEM; 16671da177e4SLinus Torvalds 166893d2341cSMatthew Dobson nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, 16691da177e4SLinus Torvalds nfs_wdata_cachep); 16701da177e4SLinus Torvalds if (nfs_wdata_mempool == NULL) 16713dd4765fSJeff Layton goto out_destroy_write_cache; 16721da177e4SLinus Torvalds 16730b7c0153SFred Isaman nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", 16740b7c0153SFred Isaman sizeof(struct nfs_commit_data), 16750b7c0153SFred Isaman 0, SLAB_HWCACHE_ALIGN, 16760b7c0153SFred Isaman NULL); 16770b7c0153SFred Isaman if (nfs_cdata_cachep == NULL) 16783dd4765fSJeff Layton goto out_destroy_write_mempool; 16790b7c0153SFred Isaman 168093d2341cSMatthew Dobson nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, 16814c100210SYanchuan Nian nfs_cdata_cachep); 16821da177e4SLinus Torvalds if (nfs_commit_mempool == NULL) 16833dd4765fSJeff Layton goto out_destroy_commit_cache; 16841da177e4SLinus Torvalds 168589a09141SPeter Zijlstra /* 168689a09141SPeter Zijlstra * NFS congestion size, scale with available memory. 168789a09141SPeter Zijlstra * 168889a09141SPeter Zijlstra * 64MB: 8192k 168989a09141SPeter Zijlstra * 128MB: 11585k 169089a09141SPeter Zijlstra * 256MB: 16384k 169189a09141SPeter Zijlstra * 512MB: 23170k 169289a09141SPeter Zijlstra * 1GB: 32768k 169389a09141SPeter Zijlstra * 2GB: 46340k 169489a09141SPeter Zijlstra * 4GB: 65536k 169589a09141SPeter Zijlstra * 8GB: 92681k 169689a09141SPeter Zijlstra * 16GB: 131072k 169789a09141SPeter Zijlstra * 169889a09141SPeter Zijlstra * This allows larger machines to have larger/more transfers. 169989a09141SPeter Zijlstra * Limit the default to 256M 170089a09141SPeter Zijlstra */ 170189a09141SPeter Zijlstra nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); 170289a09141SPeter Zijlstra if (nfs_congestion_kb > 256*1024) 170389a09141SPeter Zijlstra nfs_congestion_kb = 256*1024; 170489a09141SPeter Zijlstra 17051da177e4SLinus Torvalds return 0; 17063dd4765fSJeff Layton 17073dd4765fSJeff Layton out_destroy_commit_cache: 17083dd4765fSJeff Layton kmem_cache_destroy(nfs_cdata_cachep); 17093dd4765fSJeff Layton out_destroy_write_mempool: 17103dd4765fSJeff Layton mempool_destroy(nfs_wdata_mempool); 17113dd4765fSJeff Layton out_destroy_write_cache: 17123dd4765fSJeff Layton kmem_cache_destroy(nfs_wdata_cachep); 17133dd4765fSJeff Layton return -ENOMEM; 17141da177e4SLinus Torvalds } 17151da177e4SLinus Torvalds 1716266bee88SDavid Brownell void nfs_destroy_writepagecache(void) 17171da177e4SLinus Torvalds { 17181da177e4SLinus Torvalds mempool_destroy(nfs_commit_mempool); 17193dd4765fSJeff Layton kmem_cache_destroy(nfs_cdata_cachep); 17201da177e4SLinus Torvalds mempool_destroy(nfs_wdata_mempool); 17211a1d92c1SAlexey Dobriyan kmem_cache_destroy(nfs_wdata_cachep); 17221da177e4SLinus Torvalds } 17231da177e4SLinus Torvalds 17244a0de55cSAnna Schumaker static const struct nfs_rw_ops nfs_rw_write_ops = { 1725a4cdda59SAnna Schumaker .rw_mode = FMODE_WRITE, 17264a0de55cSAnna Schumaker .rw_alloc_header = nfs_writehdr_alloc, 17274a0de55cSAnna Schumaker .rw_free_header = nfs_writehdr_free, 1728a4cdda59SAnna Schumaker .rw_release = nfs_writeback_release_common, 17290eecb214SAnna Schumaker .rw_done = nfs_writeback_done, 17300eecb214SAnna Schumaker .rw_result = nfs_writeback_result, 17311ed26f33SAnna Schumaker .rw_initiate = nfs_initiate_write, 17324a0de55cSAnna Schumaker }; 1733