1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/nfs/write.c 4 * 5 * Write file data over NFS. 6 * 7 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> 8 */ 9 10 #include <linux/types.h> 11 #include <linux/slab.h> 12 #include <linux/mm.h> 13 #include <linux/pagemap.h> 14 #include <linux/file.h> 15 #include <linux/writeback.h> 16 #include <linux/swap.h> 17 #include <linux/migrate.h> 18 19 #include <linux/sunrpc/clnt.h> 20 #include <linux/nfs_fs.h> 21 #include <linux/nfs_mount.h> 22 #include <linux/nfs_page.h> 23 #include <linux/backing-dev.h> 24 #include <linux/export.h> 25 #include <linux/freezer.h> 26 #include <linux/wait.h> 27 #include <linux/iversion.h> 28 29 #include <linux/uaccess.h> 30 #include <linux/sched/mm.h> 31 32 #include "delegation.h" 33 #include "internal.h" 34 #include "iostat.h" 35 #include "nfs4_fs.h" 36 #include "fscache.h" 37 #include "pnfs.h" 38 39 #include "nfstrace.h" 40 41 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 42 43 #define MIN_POOL_WRITE (32) 44 #define MIN_POOL_COMMIT (4) 45 46 struct nfs_io_completion { 47 void (*complete)(void *data); 48 void *data; 49 struct kref refcount; 50 }; 51 52 /* 53 * Local function declarations 54 */ 55 static void nfs_redirty_request(struct nfs_page *req); 56 static const struct rpc_call_ops nfs_commit_ops; 57 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 58 static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 59 static const struct nfs_rw_ops nfs_rw_write_ops; 60 static void nfs_inode_remove_request(struct nfs_page *req); 61 static void nfs_clear_request_commit(struct nfs_page *req); 62 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 63 struct inode *inode); 64 static struct nfs_page * 65 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 66 struct page *page); 67 68 static struct kmem_cache *nfs_wdata_cachep; 69 static mempool_t *nfs_wdata_mempool; 70 static struct kmem_cache *nfs_cdata_cachep; 71 static mempool_t *nfs_commit_mempool; 72 73 struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail) 74 { 75 struct nfs_commit_data *p; 76 77 if (never_fail) 78 p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); 79 else { 80 /* It is OK to do some reclaim, not no safe to wait 81 * for anything to be returned to the pool. 82 * mempool_alloc() cannot handle that particular combination, 83 * so we need two separate attempts. 84 */ 85 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); 86 if (!p) 87 p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO | 88 __GFP_NOWARN | __GFP_NORETRY); 89 if (!p) 90 return NULL; 91 } 92 93 memset(p, 0, sizeof(*p)); 94 INIT_LIST_HEAD(&p->pages); 95 return p; 96 } 97 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); 98 99 void nfs_commit_free(struct nfs_commit_data *p) 100 { 101 mempool_free(p, nfs_commit_mempool); 102 } 103 EXPORT_SYMBOL_GPL(nfs_commit_free); 104 105 static struct nfs_pgio_header *nfs_writehdr_alloc(void) 106 { 107 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_KERNEL); 108 109 memset(p, 0, sizeof(*p)); 110 p->rw_mode = FMODE_WRITE; 111 return p; 112 } 113 114 static void nfs_writehdr_free(struct nfs_pgio_header *hdr) 115 { 116 mempool_free(hdr, nfs_wdata_mempool); 117 } 118 119 static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags) 120 { 121 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags); 122 } 123 124 static void nfs_io_completion_init(struct nfs_io_completion *ioc, 125 void (*complete)(void *), void *data) 126 { 127 ioc->complete = complete; 128 ioc->data = data; 129 kref_init(&ioc->refcount); 130 } 131 132 static void nfs_io_completion_release(struct kref *kref) 133 { 134 struct nfs_io_completion *ioc = container_of(kref, 135 struct nfs_io_completion, refcount); 136 ioc->complete(ioc->data); 137 kfree(ioc); 138 } 139 140 static void nfs_io_completion_get(struct nfs_io_completion *ioc) 141 { 142 if (ioc != NULL) 143 kref_get(&ioc->refcount); 144 } 145 146 static void nfs_io_completion_put(struct nfs_io_completion *ioc) 147 { 148 if (ioc != NULL) 149 kref_put(&ioc->refcount, nfs_io_completion_release); 150 } 151 152 static struct nfs_page * 153 nfs_page_private_request(struct page *page) 154 { 155 if (!PagePrivate(page)) 156 return NULL; 157 return (struct nfs_page *)page_private(page); 158 } 159 160 /* 161 * nfs_page_find_head_request_locked - find head request associated with @page 162 * 163 * must be called while holding the inode lock. 164 * 165 * returns matching head request with reference held, or NULL if not found. 166 */ 167 static struct nfs_page * 168 nfs_page_find_private_request(struct page *page) 169 { 170 struct address_space *mapping = page_file_mapping(page); 171 struct nfs_page *req; 172 173 if (!PagePrivate(page)) 174 return NULL; 175 spin_lock(&mapping->private_lock); 176 req = nfs_page_private_request(page); 177 if (req) { 178 WARN_ON_ONCE(req->wb_head != req); 179 kref_get(&req->wb_kref); 180 } 181 spin_unlock(&mapping->private_lock); 182 return req; 183 } 184 185 static struct nfs_page * 186 nfs_page_find_swap_request(struct page *page) 187 { 188 struct inode *inode = page_file_mapping(page)->host; 189 struct nfs_inode *nfsi = NFS_I(inode); 190 struct nfs_page *req = NULL; 191 if (!PageSwapCache(page)) 192 return NULL; 193 mutex_lock(&nfsi->commit_mutex); 194 if (PageSwapCache(page)) { 195 req = nfs_page_search_commits_for_head_request_locked(nfsi, 196 page); 197 if (req) { 198 WARN_ON_ONCE(req->wb_head != req); 199 kref_get(&req->wb_kref); 200 } 201 } 202 mutex_unlock(&nfsi->commit_mutex); 203 return req; 204 } 205 206 /* 207 * nfs_page_find_head_request - find head request associated with @page 208 * 209 * returns matching head request with reference held, or NULL if not found. 210 */ 211 static struct nfs_page *nfs_page_find_head_request(struct page *page) 212 { 213 struct nfs_page *req; 214 215 req = nfs_page_find_private_request(page); 216 if (!req) 217 req = nfs_page_find_swap_request(page); 218 return req; 219 } 220 221 /* Adjust the file length if we're writing beyond the end */ 222 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) 223 { 224 struct inode *inode = page_file_mapping(page)->host; 225 loff_t end, i_size; 226 pgoff_t end_index; 227 228 spin_lock(&inode->i_lock); 229 i_size = i_size_read(inode); 230 end_index = (i_size - 1) >> PAGE_SHIFT; 231 if (i_size > 0 && page_index(page) < end_index) 232 goto out; 233 end = page_file_offset(page) + ((loff_t)offset+count); 234 if (i_size >= end) 235 goto out; 236 i_size_write(inode, end); 237 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; 238 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); 239 out: 240 spin_unlock(&inode->i_lock); 241 } 242 243 /* A writeback failed: mark the page as bad, and invalidate the page cache */ 244 static void nfs_set_pageerror(struct address_space *mapping) 245 { 246 nfs_zap_mapping(mapping->host, mapping); 247 } 248 249 static void nfs_mapping_set_error(struct page *page, int error) 250 { 251 SetPageError(page); 252 mapping_set_error(page_file_mapping(page), error); 253 } 254 255 /* 256 * nfs_page_group_search_locked 257 * @head - head request of page group 258 * @page_offset - offset into page 259 * 260 * Search page group with head @head to find a request that contains the 261 * page offset @page_offset. 262 * 263 * Returns a pointer to the first matching nfs request, or NULL if no 264 * match is found. 265 * 266 * Must be called with the page group lock held 267 */ 268 static struct nfs_page * 269 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) 270 { 271 struct nfs_page *req; 272 273 req = head; 274 do { 275 if (page_offset >= req->wb_pgbase && 276 page_offset < (req->wb_pgbase + req->wb_bytes)) 277 return req; 278 279 req = req->wb_this_page; 280 } while (req != head); 281 282 return NULL; 283 } 284 285 /* 286 * nfs_page_group_covers_page 287 * @head - head request of page group 288 * 289 * Return true if the page group with head @head covers the whole page, 290 * returns false otherwise 291 */ 292 static bool nfs_page_group_covers_page(struct nfs_page *req) 293 { 294 struct nfs_page *tmp; 295 unsigned int pos = 0; 296 unsigned int len = nfs_page_length(req->wb_page); 297 298 nfs_page_group_lock(req); 299 300 for (;;) { 301 tmp = nfs_page_group_search_locked(req->wb_head, pos); 302 if (!tmp) 303 break; 304 pos = tmp->wb_pgbase + tmp->wb_bytes; 305 } 306 307 nfs_page_group_unlock(req); 308 return pos >= len; 309 } 310 311 /* We can set the PG_uptodate flag if we see that a write request 312 * covers the full page. 313 */ 314 static void nfs_mark_uptodate(struct nfs_page *req) 315 { 316 if (PageUptodate(req->wb_page)) 317 return; 318 if (!nfs_page_group_covers_page(req)) 319 return; 320 SetPageUptodate(req->wb_page); 321 } 322 323 static int wb_priority(struct writeback_control *wbc) 324 { 325 int ret = 0; 326 327 if (wbc->sync_mode == WB_SYNC_ALL) 328 ret = FLUSH_COND_STABLE; 329 return ret; 330 } 331 332 /* 333 * NFS congestion control 334 */ 335 336 int nfs_congestion_kb; 337 338 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) 339 #define NFS_CONGESTION_OFF_THRESH \ 340 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) 341 342 static void nfs_set_page_writeback(struct page *page) 343 { 344 struct inode *inode = page_file_mapping(page)->host; 345 struct nfs_server *nfss = NFS_SERVER(inode); 346 int ret = test_set_page_writeback(page); 347 348 WARN_ON_ONCE(ret != 0); 349 350 if (atomic_long_inc_return(&nfss->writeback) > 351 NFS_CONGESTION_ON_THRESH) 352 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 353 } 354 355 static void nfs_end_page_writeback(struct nfs_page *req) 356 { 357 struct inode *inode = page_file_mapping(req->wb_page)->host; 358 struct nfs_server *nfss = NFS_SERVER(inode); 359 bool is_done; 360 361 is_done = nfs_page_group_sync_on_bit(req, PG_WB_END); 362 nfs_unlock_request(req); 363 if (!is_done) 364 return; 365 366 end_page_writeback(req->wb_page); 367 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 368 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 369 } 370 371 /* 372 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req 373 * 374 * this is a helper function for nfs_lock_and_join_requests 375 * 376 * @inode - inode associated with request page group, must be holding inode lock 377 * @head - head request of page group, must be holding head lock 378 * @req - request that couldn't lock and needs to wait on the req bit lock 379 * 380 * NOTE: this must be called holding page_group bit lock 381 * which will be released before returning. 382 * 383 * returns 0 on success, < 0 on error. 384 */ 385 static void 386 nfs_unroll_locks(struct inode *inode, struct nfs_page *head, 387 struct nfs_page *req) 388 { 389 struct nfs_page *tmp; 390 391 /* relinquish all the locks successfully grabbed this run */ 392 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { 393 if (!kref_read(&tmp->wb_kref)) 394 continue; 395 nfs_unlock_and_release_request(tmp); 396 } 397 } 398 399 /* 400 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests 401 * 402 * @destroy_list - request list (using wb_this_page) terminated by @old_head 403 * @old_head - the old head of the list 404 * 405 * All subrequests must be locked and removed from all lists, so at this point 406 * they are only "active" in this function, and possibly in nfs_wait_on_request 407 * with a reference held by some other context. 408 */ 409 static void 410 nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, 411 struct nfs_page *old_head, 412 struct inode *inode) 413 { 414 while (destroy_list) { 415 struct nfs_page *subreq = destroy_list; 416 417 destroy_list = (subreq->wb_this_page == old_head) ? 418 NULL : subreq->wb_this_page; 419 420 WARN_ON_ONCE(old_head != subreq->wb_head); 421 422 /* make sure old group is not used */ 423 subreq->wb_this_page = subreq; 424 425 clear_bit(PG_REMOVE, &subreq->wb_flags); 426 427 /* Note: races with nfs_page_group_destroy() */ 428 if (!kref_read(&subreq->wb_kref)) { 429 /* Check if we raced with nfs_page_group_destroy() */ 430 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) 431 nfs_free_request(subreq); 432 continue; 433 } 434 435 subreq->wb_head = subreq; 436 437 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { 438 nfs_release_request(subreq); 439 atomic_long_dec(&NFS_I(inode)->nrequests); 440 } 441 442 /* subreq is now totally disconnected from page group or any 443 * write / commit lists. last chance to wake any waiters */ 444 nfs_unlock_and_release_request(subreq); 445 } 446 } 447 448 /* 449 * nfs_lock_and_join_requests - join all subreqs to the head req and return 450 * a locked reference, cancelling any pending 451 * operations for this page. 452 * 453 * @page - the page used to lookup the "page group" of nfs_page structures 454 * 455 * This function joins all sub requests to the head request by first 456 * locking all requests in the group, cancelling any pending operations 457 * and finally updating the head request to cover the whole range covered by 458 * the (former) group. All subrequests are removed from any write or commit 459 * lists, unlinked from the group and destroyed. 460 * 461 * Returns a locked, referenced pointer to the head request - which after 462 * this call is guaranteed to be the only request associated with the page. 463 * Returns NULL if no requests are found for @page, or a ERR_PTR if an 464 * error was encountered. 465 */ 466 static struct nfs_page * 467 nfs_lock_and_join_requests(struct page *page) 468 { 469 struct inode *inode = page_file_mapping(page)->host; 470 struct nfs_page *head, *subreq; 471 struct nfs_page *destroy_list = NULL; 472 unsigned int total_bytes; 473 int ret; 474 475 try_again: 476 /* 477 * A reference is taken only on the head request which acts as a 478 * reference to the whole page group - the group will not be destroyed 479 * until the head reference is released. 480 */ 481 head = nfs_page_find_head_request(page); 482 if (!head) 483 return NULL; 484 485 /* lock the page head first in order to avoid an ABBA inefficiency */ 486 if (!nfs_lock_request(head)) { 487 ret = nfs_wait_on_request(head); 488 nfs_release_request(head); 489 if (ret < 0) 490 return ERR_PTR(ret); 491 goto try_again; 492 } 493 494 /* Ensure that nobody removed the request before we locked it */ 495 if (head != nfs_page_private_request(page) && !PageSwapCache(page)) { 496 nfs_unlock_and_release_request(head); 497 goto try_again; 498 } 499 500 ret = nfs_page_group_lock(head); 501 if (ret < 0) 502 goto release_request; 503 504 /* lock each request in the page group */ 505 total_bytes = head->wb_bytes; 506 for (subreq = head->wb_this_page; subreq != head; 507 subreq = subreq->wb_this_page) { 508 509 if (!kref_get_unless_zero(&subreq->wb_kref)) { 510 if (subreq->wb_offset == head->wb_offset + total_bytes) 511 total_bytes += subreq->wb_bytes; 512 continue; 513 } 514 515 while (!nfs_lock_request(subreq)) { 516 /* 517 * Unlock page to allow nfs_page_group_sync_on_bit() 518 * to succeed 519 */ 520 nfs_page_group_unlock(head); 521 ret = nfs_wait_on_request(subreq); 522 if (!ret) 523 ret = nfs_page_group_lock(head); 524 if (ret < 0) { 525 nfs_unroll_locks(inode, head, subreq); 526 nfs_release_request(subreq); 527 goto release_request; 528 } 529 } 530 /* 531 * Subrequests are always contiguous, non overlapping 532 * and in order - but may be repeated (mirrored writes). 533 */ 534 if (subreq->wb_offset == (head->wb_offset + total_bytes)) { 535 /* keep track of how many bytes this group covers */ 536 total_bytes += subreq->wb_bytes; 537 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || 538 ((subreq->wb_offset + subreq->wb_bytes) > 539 (head->wb_offset + total_bytes)))) { 540 nfs_page_group_unlock(head); 541 nfs_unroll_locks(inode, head, subreq); 542 nfs_unlock_and_release_request(subreq); 543 ret = -EIO; 544 goto release_request; 545 } 546 } 547 548 /* Now that all requests are locked, make sure they aren't on any list. 549 * Commit list removal accounting is done after locks are dropped */ 550 subreq = head; 551 do { 552 nfs_clear_request_commit(subreq); 553 subreq = subreq->wb_this_page; 554 } while (subreq != head); 555 556 /* unlink subrequests from head, destroy them later */ 557 if (head->wb_this_page != head) { 558 /* destroy list will be terminated by head */ 559 destroy_list = head->wb_this_page; 560 head->wb_this_page = head; 561 562 /* change head request to cover whole range that 563 * the former page group covered */ 564 head->wb_bytes = total_bytes; 565 } 566 567 /* Postpone destruction of this request */ 568 if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) { 569 set_bit(PG_INODE_REF, &head->wb_flags); 570 kref_get(&head->wb_kref); 571 atomic_long_inc(&NFS_I(inode)->nrequests); 572 } 573 574 nfs_page_group_unlock(head); 575 576 nfs_destroy_unlinked_subrequests(destroy_list, head, inode); 577 578 /* Did we lose a race with nfs_inode_remove_request()? */ 579 if (!(PagePrivate(page) || PageSwapCache(page))) { 580 nfs_unlock_and_release_request(head); 581 return NULL; 582 } 583 584 /* still holds ref on head from nfs_page_find_head_request 585 * and still has lock on head from lock loop */ 586 return head; 587 588 release_request: 589 nfs_unlock_and_release_request(head); 590 return ERR_PTR(ret); 591 } 592 593 static void nfs_write_error(struct nfs_page *req, int error) 594 { 595 nfs_set_pageerror(page_file_mapping(req->wb_page)); 596 nfs_mapping_set_error(req->wb_page, error); 597 nfs_inode_remove_request(req); 598 nfs_end_page_writeback(req); 599 nfs_release_request(req); 600 } 601 602 /* 603 * Find an associated nfs write request, and prepare to flush it out 604 * May return an error if the user signalled nfs_wait_on_request(). 605 */ 606 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 607 struct page *page) 608 { 609 struct nfs_page *req; 610 int ret = 0; 611 612 req = nfs_lock_and_join_requests(page); 613 if (!req) 614 goto out; 615 ret = PTR_ERR(req); 616 if (IS_ERR(req)) 617 goto out; 618 619 nfs_set_page_writeback(page); 620 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 621 622 /* If there is a fatal error that covers this write, just exit */ 623 ret = pgio->pg_error; 624 if (nfs_error_is_fatal_on_server(ret)) 625 goto out_launder; 626 627 ret = 0; 628 if (!nfs_pageio_add_request(pgio, req)) { 629 ret = pgio->pg_error; 630 /* 631 * Remove the problematic req upon fatal errors on the server 632 */ 633 if (nfs_error_is_fatal(ret)) { 634 if (nfs_error_is_fatal_on_server(ret)) 635 goto out_launder; 636 } else 637 ret = -EAGAIN; 638 nfs_redirty_request(req); 639 pgio->pg_error = 0; 640 } else 641 nfs_add_stats(page_file_mapping(page)->host, 642 NFSIOS_WRITEPAGES, 1); 643 out: 644 return ret; 645 out_launder: 646 nfs_write_error(req, ret); 647 return 0; 648 } 649 650 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, 651 struct nfs_pageio_descriptor *pgio) 652 { 653 int ret; 654 655 nfs_pageio_cond_complete(pgio, page_index(page)); 656 ret = nfs_page_async_flush(pgio, page); 657 if (ret == -EAGAIN) { 658 redirty_page_for_writepage(wbc, page); 659 ret = AOP_WRITEPAGE_ACTIVATE; 660 } 661 return ret; 662 } 663 664 /* 665 * Write an mmapped page to the server. 666 */ 667 static int nfs_writepage_locked(struct page *page, 668 struct writeback_control *wbc) 669 { 670 struct nfs_pageio_descriptor pgio; 671 struct inode *inode = page_file_mapping(page)->host; 672 int err; 673 674 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 675 nfs_pageio_init_write(&pgio, inode, 0, 676 false, &nfs_async_write_completion_ops); 677 err = nfs_do_writepage(page, wbc, &pgio); 678 pgio.pg_error = 0; 679 nfs_pageio_complete(&pgio); 680 if (err < 0) 681 return err; 682 if (nfs_error_is_fatal(pgio.pg_error)) 683 return pgio.pg_error; 684 return 0; 685 } 686 687 int nfs_writepage(struct page *page, struct writeback_control *wbc) 688 { 689 int ret; 690 691 ret = nfs_writepage_locked(page, wbc); 692 if (ret != AOP_WRITEPAGE_ACTIVATE) 693 unlock_page(page); 694 return ret; 695 } 696 697 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data) 698 { 699 int ret; 700 701 ret = nfs_do_writepage(page, wbc, data); 702 if (ret != AOP_WRITEPAGE_ACTIVATE) 703 unlock_page(page); 704 return ret; 705 } 706 707 static void nfs_io_completion_commit(void *inode) 708 { 709 nfs_commit_inode(inode, 0); 710 } 711 712 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) 713 { 714 struct inode *inode = mapping->host; 715 struct nfs_pageio_descriptor pgio; 716 struct nfs_io_completion *ioc; 717 int err; 718 719 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); 720 721 ioc = nfs_io_completion_alloc(GFP_KERNEL); 722 if (ioc) 723 nfs_io_completion_init(ioc, nfs_io_completion_commit, inode); 724 725 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false, 726 &nfs_async_write_completion_ops); 727 pgio.pg_io_completion = ioc; 728 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 729 pgio.pg_error = 0; 730 nfs_pageio_complete(&pgio); 731 nfs_io_completion_put(ioc); 732 733 if (err < 0) 734 goto out_err; 735 err = pgio.pg_error; 736 if (nfs_error_is_fatal(err)) 737 goto out_err; 738 return 0; 739 out_err: 740 return err; 741 } 742 743 /* 744 * Insert a write request into an inode 745 */ 746 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) 747 { 748 struct address_space *mapping = page_file_mapping(req->wb_page); 749 struct nfs_inode *nfsi = NFS_I(inode); 750 751 WARN_ON_ONCE(req->wb_this_page != req); 752 753 /* Lock the request! */ 754 nfs_lock_request(req); 755 756 /* 757 * Swap-space should not get truncated. Hence no need to plug the race 758 * with invalidate/truncate. 759 */ 760 spin_lock(&mapping->private_lock); 761 if (!nfs_have_writebacks(inode) && 762 NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 763 inode_inc_iversion_raw(inode); 764 if (likely(!PageSwapCache(req->wb_page))) { 765 set_bit(PG_MAPPED, &req->wb_flags); 766 SetPagePrivate(req->wb_page); 767 set_page_private(req->wb_page, (unsigned long)req); 768 } 769 spin_unlock(&mapping->private_lock); 770 atomic_long_inc(&nfsi->nrequests); 771 /* this a head request for a page group - mark it as having an 772 * extra reference so sub groups can follow suit. 773 * This flag also informs pgio layer when to bump nrequests when 774 * adding subrequests. */ 775 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); 776 kref_get(&req->wb_kref); 777 } 778 779 /* 780 * Remove a write request from an inode 781 */ 782 static void nfs_inode_remove_request(struct nfs_page *req) 783 { 784 struct address_space *mapping = page_file_mapping(req->wb_page); 785 struct inode *inode = mapping->host; 786 struct nfs_inode *nfsi = NFS_I(inode); 787 struct nfs_page *head; 788 789 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { 790 head = req->wb_head; 791 792 spin_lock(&mapping->private_lock); 793 if (likely(head->wb_page && !PageSwapCache(head->wb_page))) { 794 set_page_private(head->wb_page, 0); 795 ClearPagePrivate(head->wb_page); 796 clear_bit(PG_MAPPED, &head->wb_flags); 797 } 798 spin_unlock(&mapping->private_lock); 799 } 800 801 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) { 802 nfs_release_request(req); 803 atomic_long_dec(&nfsi->nrequests); 804 } 805 } 806 807 static void 808 nfs_mark_request_dirty(struct nfs_page *req) 809 { 810 if (req->wb_page) 811 __set_page_dirty_nobuffers(req->wb_page); 812 } 813 814 /* 815 * nfs_page_search_commits_for_head_request_locked 816 * 817 * Search through commit lists on @inode for the head request for @page. 818 * Must be called while holding the inode (which is cinfo) lock. 819 * 820 * Returns the head request if found, or NULL if not found. 821 */ 822 static struct nfs_page * 823 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 824 struct page *page) 825 { 826 struct nfs_page *freq, *t; 827 struct nfs_commit_info cinfo; 828 struct inode *inode = &nfsi->vfs_inode; 829 830 nfs_init_cinfo_from_inode(&cinfo, inode); 831 832 /* search through pnfs commit lists */ 833 freq = pnfs_search_commit_reqs(inode, &cinfo, page); 834 if (freq) 835 return freq->wb_head; 836 837 /* Linearly search the commit list for the correct request */ 838 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { 839 if (freq->wb_page == page) 840 return freq->wb_head; 841 } 842 843 return NULL; 844 } 845 846 /** 847 * nfs_request_add_commit_list_locked - add request to a commit list 848 * @req: pointer to a struct nfs_page 849 * @dst: commit list head 850 * @cinfo: holds list lock and accounting info 851 * 852 * This sets the PG_CLEAN bit, updates the cinfo count of 853 * number of outstanding requests requiring a commit as well as 854 * the MM page stats. 855 * 856 * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the 857 * nfs_page lock. 858 */ 859 void 860 nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, 861 struct nfs_commit_info *cinfo) 862 { 863 set_bit(PG_CLEAN, &req->wb_flags); 864 nfs_list_add_request(req, dst); 865 atomic_long_inc(&cinfo->mds->ncommit); 866 } 867 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); 868 869 /** 870 * nfs_request_add_commit_list - add request to a commit list 871 * @req: pointer to a struct nfs_page 872 * @cinfo: holds list lock and accounting info 873 * 874 * This sets the PG_CLEAN bit, updates the cinfo count of 875 * number of outstanding requests requiring a commit as well as 876 * the MM page stats. 877 * 878 * The caller must _not_ hold the cinfo->lock, but must be 879 * holding the nfs_page lock. 880 */ 881 void 882 nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo) 883 { 884 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 885 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); 886 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 887 if (req->wb_page) 888 nfs_mark_page_unstable(req->wb_page, cinfo); 889 } 890 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 891 892 /** 893 * nfs_request_remove_commit_list - Remove request from a commit list 894 * @req: pointer to a nfs_page 895 * @cinfo: holds list lock and accounting info 896 * 897 * This clears the PG_CLEAN bit, and updates the cinfo's count of 898 * number of outstanding requests requiring a commit 899 * It does not update the MM page stats. 900 * 901 * The caller _must_ hold the cinfo->lock and the nfs_page lock. 902 */ 903 void 904 nfs_request_remove_commit_list(struct nfs_page *req, 905 struct nfs_commit_info *cinfo) 906 { 907 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) 908 return; 909 nfs_list_remove_request(req); 910 atomic_long_dec(&cinfo->mds->ncommit); 911 } 912 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); 913 914 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 915 struct inode *inode) 916 { 917 cinfo->inode = inode; 918 cinfo->mds = &NFS_I(inode)->commit_info; 919 cinfo->ds = pnfs_get_ds_info(inode); 920 cinfo->dreq = NULL; 921 cinfo->completion_ops = &nfs_commit_completion_ops; 922 } 923 924 void nfs_init_cinfo(struct nfs_commit_info *cinfo, 925 struct inode *inode, 926 struct nfs_direct_req *dreq) 927 { 928 if (dreq) 929 nfs_init_cinfo_from_dreq(cinfo, dreq); 930 else 931 nfs_init_cinfo_from_inode(cinfo, inode); 932 } 933 EXPORT_SYMBOL_GPL(nfs_init_cinfo); 934 935 /* 936 * Add a request to the inode's commit list. 937 */ 938 void 939 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 940 struct nfs_commit_info *cinfo, u32 ds_commit_idx) 941 { 942 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx)) 943 return; 944 nfs_request_add_commit_list(req, cinfo); 945 } 946 947 static void 948 nfs_clear_page_commit(struct page *page) 949 { 950 dec_node_page_state(page, NR_UNSTABLE_NFS); 951 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb, 952 WB_RECLAIMABLE); 953 } 954 955 /* Called holding the request lock on @req */ 956 static void 957 nfs_clear_request_commit(struct nfs_page *req) 958 { 959 if (test_bit(PG_CLEAN, &req->wb_flags)) { 960 struct nfs_open_context *ctx = nfs_req_openctx(req); 961 struct inode *inode = d_inode(ctx->dentry); 962 struct nfs_commit_info cinfo; 963 964 nfs_init_cinfo_from_inode(&cinfo, inode); 965 mutex_lock(&NFS_I(inode)->commit_mutex); 966 if (!pnfs_clear_request_commit(req, &cinfo)) { 967 nfs_request_remove_commit_list(req, &cinfo); 968 } 969 mutex_unlock(&NFS_I(inode)->commit_mutex); 970 nfs_clear_page_commit(req->wb_page); 971 } 972 } 973 974 int nfs_write_need_commit(struct nfs_pgio_header *hdr) 975 { 976 if (hdr->verf.committed == NFS_DATA_SYNC) 977 return hdr->lseg == NULL; 978 return hdr->verf.committed != NFS_FILE_SYNC; 979 } 980 981 static void nfs_async_write_init(struct nfs_pgio_header *hdr) 982 { 983 nfs_io_completion_get(hdr->io_completion); 984 } 985 986 static void nfs_write_completion(struct nfs_pgio_header *hdr) 987 { 988 struct nfs_commit_info cinfo; 989 unsigned long bytes = 0; 990 991 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 992 goto out; 993 nfs_init_cinfo_from_inode(&cinfo, hdr->inode); 994 while (!list_empty(&hdr->pages)) { 995 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 996 997 bytes += req->wb_bytes; 998 nfs_list_remove_request(req); 999 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && 1000 (hdr->good_bytes < bytes)) { 1001 nfs_set_pageerror(page_file_mapping(req->wb_page)); 1002 nfs_mapping_set_error(req->wb_page, hdr->error); 1003 goto remove_req; 1004 } 1005 if (nfs_write_need_commit(hdr)) { 1006 /* Reset wb_nio, since the write was successful. */ 1007 req->wb_nio = 0; 1008 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); 1009 nfs_mark_request_commit(req, hdr->lseg, &cinfo, 1010 hdr->pgio_mirror_idx); 1011 goto next; 1012 } 1013 remove_req: 1014 nfs_inode_remove_request(req); 1015 next: 1016 nfs_end_page_writeback(req); 1017 nfs_release_request(req); 1018 } 1019 out: 1020 nfs_io_completion_put(hdr->io_completion); 1021 hdr->release(hdr); 1022 } 1023 1024 unsigned long 1025 nfs_reqs_to_commit(struct nfs_commit_info *cinfo) 1026 { 1027 return atomic_long_read(&cinfo->mds->ncommit); 1028 } 1029 1030 /* NFS_I(cinfo->inode)->commit_mutex held by caller */ 1031 int 1032 nfs_scan_commit_list(struct list_head *src, struct list_head *dst, 1033 struct nfs_commit_info *cinfo, int max) 1034 { 1035 struct nfs_page *req, *tmp; 1036 int ret = 0; 1037 1038 restart: 1039 list_for_each_entry_safe(req, tmp, src, wb_list) { 1040 kref_get(&req->wb_kref); 1041 if (!nfs_lock_request(req)) { 1042 int status; 1043 1044 /* Prevent deadlock with nfs_lock_and_join_requests */ 1045 if (!list_empty(dst)) { 1046 nfs_release_request(req); 1047 continue; 1048 } 1049 /* Ensure we make progress to prevent livelock */ 1050 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1051 status = nfs_wait_on_request(req); 1052 nfs_release_request(req); 1053 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 1054 if (status < 0) 1055 break; 1056 goto restart; 1057 } 1058 nfs_request_remove_commit_list(req, cinfo); 1059 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); 1060 nfs_list_add_request(req, dst); 1061 ret++; 1062 if ((ret == max) && !cinfo->dreq) 1063 break; 1064 cond_resched(); 1065 } 1066 return ret; 1067 } 1068 EXPORT_SYMBOL_GPL(nfs_scan_commit_list); 1069 1070 /* 1071 * nfs_scan_commit - Scan an inode for commit requests 1072 * @inode: NFS inode to scan 1073 * @dst: mds destination list 1074 * @cinfo: mds and ds lists of reqs ready to commit 1075 * 1076 * Moves requests from the inode's 'commit' request list. 1077 * The requests are *not* checked to ensure that they form a contiguous set. 1078 */ 1079 int 1080 nfs_scan_commit(struct inode *inode, struct list_head *dst, 1081 struct nfs_commit_info *cinfo) 1082 { 1083 int ret = 0; 1084 1085 if (!atomic_long_read(&cinfo->mds->ncommit)) 1086 return 0; 1087 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 1088 if (atomic_long_read(&cinfo->mds->ncommit) > 0) { 1089 const int max = INT_MAX; 1090 1091 ret = nfs_scan_commit_list(&cinfo->mds->list, dst, 1092 cinfo, max); 1093 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); 1094 } 1095 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1096 return ret; 1097 } 1098 1099 /* 1100 * Search for an existing write request, and attempt to update 1101 * it to reflect a new dirty region on a given page. 1102 * 1103 * If the attempt fails, then the existing request is flushed out 1104 * to disk. 1105 */ 1106 static struct nfs_page *nfs_try_to_update_request(struct inode *inode, 1107 struct page *page, 1108 unsigned int offset, 1109 unsigned int bytes) 1110 { 1111 struct nfs_page *req; 1112 unsigned int rqend; 1113 unsigned int end; 1114 int error; 1115 1116 end = offset + bytes; 1117 1118 req = nfs_lock_and_join_requests(page); 1119 if (IS_ERR_OR_NULL(req)) 1120 return req; 1121 1122 rqend = req->wb_offset + req->wb_bytes; 1123 /* 1124 * Tell the caller to flush out the request if 1125 * the offsets are non-contiguous. 1126 * Note: nfs_flush_incompatible() will already 1127 * have flushed out requests having wrong owners. 1128 */ 1129 if (offset > rqend || end < req->wb_offset) 1130 goto out_flushme; 1131 1132 /* Okay, the request matches. Update the region */ 1133 if (offset < req->wb_offset) { 1134 req->wb_offset = offset; 1135 req->wb_pgbase = offset; 1136 } 1137 if (end > rqend) 1138 req->wb_bytes = end - req->wb_offset; 1139 else 1140 req->wb_bytes = rqend - req->wb_offset; 1141 req->wb_nio = 0; 1142 return req; 1143 out_flushme: 1144 /* 1145 * Note: we mark the request dirty here because 1146 * nfs_lock_and_join_requests() cannot preserve 1147 * commit flags, so we have to replay the write. 1148 */ 1149 nfs_mark_request_dirty(req); 1150 nfs_unlock_and_release_request(req); 1151 error = nfs_wb_page(inode, page); 1152 return (error < 0) ? ERR_PTR(error) : NULL; 1153 } 1154 1155 /* 1156 * Try to update an existing write request, or create one if there is none. 1157 * 1158 * Note: Should always be called with the Page Lock held to prevent races 1159 * if we have to add a new request. Also assumes that the caller has 1160 * already called nfs_flush_incompatible() if necessary. 1161 */ 1162 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, 1163 struct page *page, unsigned int offset, unsigned int bytes) 1164 { 1165 struct inode *inode = page_file_mapping(page)->host; 1166 struct nfs_page *req; 1167 1168 req = nfs_try_to_update_request(inode, page, offset, bytes); 1169 if (req != NULL) 1170 goto out; 1171 req = nfs_create_request(ctx, page, offset, bytes); 1172 if (IS_ERR(req)) 1173 goto out; 1174 nfs_inode_add_request(inode, req); 1175 out: 1176 return req; 1177 } 1178 1179 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, 1180 unsigned int offset, unsigned int count) 1181 { 1182 struct nfs_page *req; 1183 1184 req = nfs_setup_write_request(ctx, page, offset, count); 1185 if (IS_ERR(req)) 1186 return PTR_ERR(req); 1187 /* Update file length */ 1188 nfs_grow_file(page, offset, count); 1189 nfs_mark_uptodate(req); 1190 nfs_mark_request_dirty(req); 1191 nfs_unlock_and_release_request(req); 1192 return 0; 1193 } 1194 1195 int nfs_flush_incompatible(struct file *file, struct page *page) 1196 { 1197 struct nfs_open_context *ctx = nfs_file_open_context(file); 1198 struct nfs_lock_context *l_ctx; 1199 struct file_lock_context *flctx = file_inode(file)->i_flctx; 1200 struct nfs_page *req; 1201 int do_flush, status; 1202 /* 1203 * Look for a request corresponding to this page. If there 1204 * is one, and it belongs to another file, we flush it out 1205 * before we try to copy anything into the page. Do this 1206 * due to the lack of an ACCESS-type call in NFSv2. 1207 * Also do the same if we find a request from an existing 1208 * dropped page. 1209 */ 1210 do { 1211 req = nfs_page_find_head_request(page); 1212 if (req == NULL) 1213 return 0; 1214 l_ctx = req->wb_lock_context; 1215 do_flush = req->wb_page != page || 1216 !nfs_match_open_context(nfs_req_openctx(req), ctx); 1217 if (l_ctx && flctx && 1218 !(list_empty_careful(&flctx->flc_posix) && 1219 list_empty_careful(&flctx->flc_flock))) { 1220 do_flush |= l_ctx->lockowner != current->files; 1221 } 1222 nfs_release_request(req); 1223 if (!do_flush) 1224 return 0; 1225 status = nfs_wb_page(page_file_mapping(page)->host, page); 1226 } while (status == 0); 1227 return status; 1228 } 1229 1230 /* 1231 * Avoid buffered writes when a open context credential's key would 1232 * expire soon. 1233 * 1234 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL. 1235 * 1236 * Return 0 and set a credential flag which triggers the inode to flush 1237 * and performs NFS_FILE_SYNC writes if the key will expired within 1238 * RPC_KEY_EXPIRE_TIMEO. 1239 */ 1240 int 1241 nfs_key_timeout_notify(struct file *filp, struct inode *inode) 1242 { 1243 struct nfs_open_context *ctx = nfs_file_open_context(filp); 1244 1245 if (nfs_ctx_key_to_expire(ctx, inode) && 1246 !ctx->ll_cred) 1247 /* Already expired! */ 1248 return -EACCES; 1249 return 0; 1250 } 1251 1252 /* 1253 * Test if the open context credential key is marked to expire soon. 1254 */ 1255 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode) 1256 { 1257 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; 1258 struct rpc_cred *cred = ctx->ll_cred; 1259 struct auth_cred acred = { 1260 .cred = ctx->cred, 1261 }; 1262 1263 if (cred && !cred->cr_ops->crmatch(&acred, cred, 0)) { 1264 put_rpccred(cred); 1265 ctx->ll_cred = NULL; 1266 cred = NULL; 1267 } 1268 if (!cred) 1269 cred = auth->au_ops->lookup_cred(auth, &acred, 0); 1270 if (!cred || IS_ERR(cred)) 1271 return true; 1272 ctx->ll_cred = cred; 1273 return !!(cred->cr_ops->crkey_timeout && 1274 cred->cr_ops->crkey_timeout(cred)); 1275 } 1276 1277 /* 1278 * If the page cache is marked as unsafe or invalid, then we can't rely on 1279 * the PageUptodate() flag. In this case, we will need to turn off 1280 * write optimisations that depend on the page contents being correct. 1281 */ 1282 static bool nfs_write_pageuptodate(struct page *page, struct inode *inode) 1283 { 1284 struct nfs_inode *nfsi = NFS_I(inode); 1285 1286 if (nfs_have_delegated_attributes(inode)) 1287 goto out; 1288 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) 1289 return false; 1290 smp_rmb(); 1291 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags)) 1292 return false; 1293 out: 1294 if (nfsi->cache_validity & NFS_INO_INVALID_DATA) 1295 return false; 1296 return PageUptodate(page) != 0; 1297 } 1298 1299 static bool 1300 is_whole_file_wrlock(struct file_lock *fl) 1301 { 1302 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && 1303 fl->fl_type == F_WRLCK; 1304 } 1305 1306 /* If we know the page is up to date, and we're not using byte range locks (or 1307 * if we have the whole file locked for writing), it may be more efficient to 1308 * extend the write to cover the entire page in order to avoid fragmentation 1309 * inefficiencies. 1310 * 1311 * If the file is opened for synchronous writes then we can just skip the rest 1312 * of the checks. 1313 */ 1314 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) 1315 { 1316 int ret; 1317 struct file_lock_context *flctx = inode->i_flctx; 1318 struct file_lock *fl; 1319 1320 if (file->f_flags & O_DSYNC) 1321 return 0; 1322 if (!nfs_write_pageuptodate(page, inode)) 1323 return 0; 1324 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 1325 return 1; 1326 if (!flctx || (list_empty_careful(&flctx->flc_flock) && 1327 list_empty_careful(&flctx->flc_posix))) 1328 return 1; 1329 1330 /* Check to see if there are whole file write locks */ 1331 ret = 0; 1332 spin_lock(&flctx->flc_lock); 1333 if (!list_empty(&flctx->flc_posix)) { 1334 fl = list_first_entry(&flctx->flc_posix, struct file_lock, 1335 fl_list); 1336 if (is_whole_file_wrlock(fl)) 1337 ret = 1; 1338 } else if (!list_empty(&flctx->flc_flock)) { 1339 fl = list_first_entry(&flctx->flc_flock, struct file_lock, 1340 fl_list); 1341 if (fl->fl_type == F_WRLCK) 1342 ret = 1; 1343 } 1344 spin_unlock(&flctx->flc_lock); 1345 return ret; 1346 } 1347 1348 /* 1349 * Update and possibly write a cached page of an NFS file. 1350 * 1351 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad 1352 * things with a page scheduled for an RPC call (e.g. invalidate it). 1353 */ 1354 int nfs_updatepage(struct file *file, struct page *page, 1355 unsigned int offset, unsigned int count) 1356 { 1357 struct nfs_open_context *ctx = nfs_file_open_context(file); 1358 struct address_space *mapping = page_file_mapping(page); 1359 struct inode *inode = mapping->host; 1360 int status = 0; 1361 1362 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 1363 1364 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n", 1365 file, count, (long long)(page_file_offset(page) + offset)); 1366 1367 if (!count) 1368 goto out; 1369 1370 if (nfs_can_extend_write(file, page, inode)) { 1371 count = max(count + offset, nfs_page_length(page)); 1372 offset = 0; 1373 } 1374 1375 status = nfs_writepage_setup(ctx, page, offset, count); 1376 if (status < 0) 1377 nfs_set_pageerror(mapping); 1378 else 1379 __set_page_dirty_nobuffers(page); 1380 out: 1381 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", 1382 status, (long long)i_size_read(inode)); 1383 return status; 1384 } 1385 1386 static int flush_task_priority(int how) 1387 { 1388 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { 1389 case FLUSH_HIGHPRI: 1390 return RPC_PRIORITY_HIGH; 1391 case FLUSH_LOWPRI: 1392 return RPC_PRIORITY_LOW; 1393 } 1394 return RPC_PRIORITY_NORMAL; 1395 } 1396 1397 static void nfs_initiate_write(struct nfs_pgio_header *hdr, 1398 struct rpc_message *msg, 1399 const struct nfs_rpc_ops *rpc_ops, 1400 struct rpc_task_setup *task_setup_data, int how) 1401 { 1402 int priority = flush_task_priority(how); 1403 1404 task_setup_data->priority = priority; 1405 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); 1406 trace_nfs_initiate_write(hdr->inode, hdr->io_start, hdr->good_bytes, 1407 hdr->args.stable); 1408 } 1409 1410 /* If a nfs_flush_* function fails, it should remove reqs from @head and 1411 * call this on each, which will prepare them to be retried on next 1412 * writeback using standard nfs. 1413 */ 1414 static void nfs_redirty_request(struct nfs_page *req) 1415 { 1416 /* Bump the transmission count */ 1417 req->wb_nio++; 1418 nfs_mark_request_dirty(req); 1419 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags); 1420 nfs_end_page_writeback(req); 1421 nfs_release_request(req); 1422 } 1423 1424 static void nfs_async_write_error(struct list_head *head, int error) 1425 { 1426 struct nfs_page *req; 1427 1428 while (!list_empty(head)) { 1429 req = nfs_list_entry(head->next); 1430 nfs_list_remove_request(req); 1431 if (nfs_error_is_fatal(error)) 1432 nfs_write_error(req, error); 1433 else 1434 nfs_redirty_request(req); 1435 } 1436 } 1437 1438 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) 1439 { 1440 nfs_async_write_error(&hdr->pages, 0); 1441 filemap_fdatawrite_range(hdr->inode->i_mapping, hdr->args.offset, 1442 hdr->args.offset + hdr->args.count - 1); 1443 } 1444 1445 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { 1446 .init_hdr = nfs_async_write_init, 1447 .error_cleanup = nfs_async_write_error, 1448 .completion = nfs_write_completion, 1449 .reschedule_io = nfs_async_write_reschedule_io, 1450 }; 1451 1452 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, 1453 struct inode *inode, int ioflags, bool force_mds, 1454 const struct nfs_pgio_completion_ops *compl_ops) 1455 { 1456 struct nfs_server *server = NFS_SERVER(inode); 1457 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; 1458 1459 #ifdef CONFIG_NFS_V4_1 1460 if (server->pnfs_curr_ld && !force_mds) 1461 pg_ops = server->pnfs_curr_ld->pg_write_ops; 1462 #endif 1463 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops, 1464 server->wsize, ioflags); 1465 } 1466 EXPORT_SYMBOL_GPL(nfs_pageio_init_write); 1467 1468 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) 1469 { 1470 struct nfs_pgio_mirror *mirror; 1471 1472 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) 1473 pgio->pg_ops->pg_cleanup(pgio); 1474 1475 pgio->pg_ops = &nfs_pgio_rw_ops; 1476 1477 nfs_pageio_stop_mirroring(pgio); 1478 1479 mirror = &pgio->pg_mirrors[0]; 1480 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; 1481 } 1482 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); 1483 1484 1485 void nfs_commit_prepare(struct rpc_task *task, void *calldata) 1486 { 1487 struct nfs_commit_data *data = calldata; 1488 1489 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); 1490 } 1491 1492 /* 1493 * Special version of should_remove_suid() that ignores capabilities. 1494 */ 1495 static int nfs_should_remove_suid(const struct inode *inode) 1496 { 1497 umode_t mode = inode->i_mode; 1498 int kill = 0; 1499 1500 /* suid always must be killed */ 1501 if (unlikely(mode & S_ISUID)) 1502 kill = ATTR_KILL_SUID; 1503 1504 /* 1505 * sgid without any exec bits is just a mandatory locking mark; leave 1506 * it alone. If some exec bits are set, it's a real sgid; kill it. 1507 */ 1508 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1509 kill |= ATTR_KILL_SGID; 1510 1511 if (unlikely(kill && S_ISREG(mode))) 1512 return kill; 1513 1514 return 0; 1515 } 1516 1517 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, 1518 struct nfs_fattr *fattr) 1519 { 1520 struct nfs_pgio_args *argp = &hdr->args; 1521 struct nfs_pgio_res *resp = &hdr->res; 1522 u64 size = argp->offset + resp->count; 1523 1524 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1525 fattr->size = size; 1526 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { 1527 fattr->valid &= ~NFS_ATTR_FATTR_SIZE; 1528 return; 1529 } 1530 if (size != fattr->size) 1531 return; 1532 /* Set attribute barrier */ 1533 nfs_fattr_set_barrier(fattr); 1534 /* ...and update size */ 1535 fattr->valid |= NFS_ATTR_FATTR_SIZE; 1536 } 1537 1538 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1539 { 1540 struct nfs_fattr *fattr = &hdr->fattr; 1541 struct inode *inode = hdr->inode; 1542 1543 spin_lock(&inode->i_lock); 1544 nfs_writeback_check_extend(hdr, fattr); 1545 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1546 spin_unlock(&inode->i_lock); 1547 } 1548 EXPORT_SYMBOL_GPL(nfs_writeback_update_inode); 1549 1550 /* 1551 * This function is called when the WRITE call is complete. 1552 */ 1553 static int nfs_writeback_done(struct rpc_task *task, 1554 struct nfs_pgio_header *hdr, 1555 struct inode *inode) 1556 { 1557 int status; 1558 1559 /* 1560 * ->write_done will attempt to use post-op attributes to detect 1561 * conflicting writes by other clients. A strict interpretation 1562 * of close-to-open would allow us to continue caching even if 1563 * another writer had changed the file, but some applications 1564 * depend on tighter cache coherency when writing. 1565 */ 1566 status = NFS_PROTO(inode)->write_done(task, hdr); 1567 if (status != 0) 1568 return status; 1569 1570 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); 1571 trace_nfs_writeback_done(inode, task->tk_status, 1572 hdr->args.offset, hdr->res.verf); 1573 1574 if (hdr->res.verf->committed < hdr->args.stable && 1575 task->tk_status >= 0) { 1576 /* We tried a write call, but the server did not 1577 * commit data to stable storage even though we 1578 * requested it. 1579 * Note: There is a known bug in Tru64 < 5.0 in which 1580 * the server reports NFS_DATA_SYNC, but performs 1581 * NFS_FILE_SYNC. We therefore implement this checking 1582 * as a dprintk() in order to avoid filling syslog. 1583 */ 1584 static unsigned long complain; 1585 1586 /* Note this will print the MDS for a DS write */ 1587 if (time_before(complain, jiffies)) { 1588 dprintk("NFS: faulty NFS server %s:" 1589 " (committed = %d) != (stable = %d)\n", 1590 NFS_SERVER(inode)->nfs_client->cl_hostname, 1591 hdr->res.verf->committed, hdr->args.stable); 1592 complain = jiffies + 300 * HZ; 1593 } 1594 } 1595 1596 /* Deal with the suid/sgid bit corner case */ 1597 if (nfs_should_remove_suid(inode)) { 1598 spin_lock(&inode->i_lock); 1599 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_OTHER; 1600 spin_unlock(&inode->i_lock); 1601 } 1602 return 0; 1603 } 1604 1605 /* 1606 * This function is called when the WRITE call is complete. 1607 */ 1608 static void nfs_writeback_result(struct rpc_task *task, 1609 struct nfs_pgio_header *hdr) 1610 { 1611 struct nfs_pgio_args *argp = &hdr->args; 1612 struct nfs_pgio_res *resp = &hdr->res; 1613 1614 if (resp->count < argp->count) { 1615 static unsigned long complain; 1616 1617 /* This a short write! */ 1618 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); 1619 1620 /* Has the server at least made some progress? */ 1621 if (resp->count == 0) { 1622 if (time_before(complain, jiffies)) { 1623 printk(KERN_WARNING 1624 "NFS: Server wrote zero bytes, expected %u.\n", 1625 argp->count); 1626 complain = jiffies + 300 * HZ; 1627 } 1628 nfs_set_pgio_error(hdr, -EIO, argp->offset); 1629 task->tk_status = -EIO; 1630 return; 1631 } 1632 1633 /* For non rpc-based layout drivers, retry-through-MDS */ 1634 if (!task->tk_ops) { 1635 hdr->pnfs_error = -EAGAIN; 1636 return; 1637 } 1638 1639 /* Was this an NFSv2 write or an NFSv3 stable write? */ 1640 if (resp->verf->committed != NFS_UNSTABLE) { 1641 /* Resend from where the server left off */ 1642 hdr->mds_offset += resp->count; 1643 argp->offset += resp->count; 1644 argp->pgbase += resp->count; 1645 argp->count -= resp->count; 1646 } else { 1647 /* Resend as a stable write in order to avoid 1648 * headaches in the case of a server crash. 1649 */ 1650 argp->stable = NFS_FILE_SYNC; 1651 } 1652 rpc_restart_call_prepare(task); 1653 } 1654 } 1655 1656 static int wait_on_commit(struct nfs_mds_commit_info *cinfo) 1657 { 1658 return wait_var_event_killable(&cinfo->rpcs_out, 1659 !atomic_read(&cinfo->rpcs_out)); 1660 } 1661 1662 static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) 1663 { 1664 atomic_inc(&cinfo->rpcs_out); 1665 } 1666 1667 static void nfs_commit_end(struct nfs_mds_commit_info *cinfo) 1668 { 1669 if (atomic_dec_and_test(&cinfo->rpcs_out)) 1670 wake_up_var(&cinfo->rpcs_out); 1671 } 1672 1673 void nfs_commitdata_release(struct nfs_commit_data *data) 1674 { 1675 put_nfs_open_context(data->context); 1676 nfs_commit_free(data); 1677 } 1678 EXPORT_SYMBOL_GPL(nfs_commitdata_release); 1679 1680 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, 1681 const struct nfs_rpc_ops *nfs_ops, 1682 const struct rpc_call_ops *call_ops, 1683 int how, int flags) 1684 { 1685 struct rpc_task *task; 1686 int priority = flush_task_priority(how); 1687 struct rpc_message msg = { 1688 .rpc_argp = &data->args, 1689 .rpc_resp = &data->res, 1690 .rpc_cred = data->cred, 1691 }; 1692 struct rpc_task_setup task_setup_data = { 1693 .task = &data->task, 1694 .rpc_client = clnt, 1695 .rpc_message = &msg, 1696 .callback_ops = call_ops, 1697 .callback_data = data, 1698 .workqueue = nfsiod_workqueue, 1699 .flags = RPC_TASK_ASYNC | flags, 1700 .priority = priority, 1701 }; 1702 /* Set up the initial task struct. */ 1703 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client); 1704 trace_nfs_initiate_commit(data); 1705 1706 dprintk("NFS: initiated commit call\n"); 1707 1708 task = rpc_run_task(&task_setup_data); 1709 if (IS_ERR(task)) 1710 return PTR_ERR(task); 1711 if (how & FLUSH_SYNC) 1712 rpc_wait_for_completion_task(task); 1713 rpc_put_task(task); 1714 return 0; 1715 } 1716 EXPORT_SYMBOL_GPL(nfs_initiate_commit); 1717 1718 static loff_t nfs_get_lwb(struct list_head *head) 1719 { 1720 loff_t lwb = 0; 1721 struct nfs_page *req; 1722 1723 list_for_each_entry(req, head, wb_list) 1724 if (lwb < (req_offset(req) + req->wb_bytes)) 1725 lwb = req_offset(req) + req->wb_bytes; 1726 1727 return lwb; 1728 } 1729 1730 /* 1731 * Set up the argument/result storage required for the RPC call. 1732 */ 1733 void nfs_init_commit(struct nfs_commit_data *data, 1734 struct list_head *head, 1735 struct pnfs_layout_segment *lseg, 1736 struct nfs_commit_info *cinfo) 1737 { 1738 struct nfs_page *first = nfs_list_entry(head->next); 1739 struct nfs_open_context *ctx = nfs_req_openctx(first); 1740 struct inode *inode = d_inode(ctx->dentry); 1741 1742 /* Set up the RPC argument and reply structs 1743 * NB: take care not to mess about with data->commit et al. */ 1744 1745 list_splice_init(head, &data->pages); 1746 1747 data->inode = inode; 1748 data->cred = ctx->cred; 1749 data->lseg = lseg; /* reference transferred */ 1750 /* only set lwb for pnfs commit */ 1751 if (lseg) 1752 data->lwb = nfs_get_lwb(&data->pages); 1753 data->mds_ops = &nfs_commit_ops; 1754 data->completion_ops = cinfo->completion_ops; 1755 data->dreq = cinfo->dreq; 1756 1757 data->args.fh = NFS_FH(data->inode); 1758 /* Note: we always request a commit of the entire inode */ 1759 data->args.offset = 0; 1760 data->args.count = 0; 1761 data->context = get_nfs_open_context(ctx); 1762 data->res.fattr = &data->fattr; 1763 data->res.verf = &data->verf; 1764 nfs_fattr_init(&data->fattr); 1765 } 1766 EXPORT_SYMBOL_GPL(nfs_init_commit); 1767 1768 void nfs_retry_commit(struct list_head *page_list, 1769 struct pnfs_layout_segment *lseg, 1770 struct nfs_commit_info *cinfo, 1771 u32 ds_commit_idx) 1772 { 1773 struct nfs_page *req; 1774 1775 while (!list_empty(page_list)) { 1776 req = nfs_list_entry(page_list->next); 1777 nfs_list_remove_request(req); 1778 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); 1779 if (!cinfo->dreq) 1780 nfs_clear_page_commit(req->wb_page); 1781 nfs_unlock_and_release_request(req); 1782 } 1783 } 1784 EXPORT_SYMBOL_GPL(nfs_retry_commit); 1785 1786 static void 1787 nfs_commit_resched_write(struct nfs_commit_info *cinfo, 1788 struct nfs_page *req) 1789 { 1790 __set_page_dirty_nobuffers(req->wb_page); 1791 } 1792 1793 /* 1794 * Commit dirty pages 1795 */ 1796 static int 1797 nfs_commit_list(struct inode *inode, struct list_head *head, int how, 1798 struct nfs_commit_info *cinfo) 1799 { 1800 struct nfs_commit_data *data; 1801 1802 /* another commit raced with us */ 1803 if (list_empty(head)) 1804 return 0; 1805 1806 data = nfs_commitdata_alloc(true); 1807 1808 /* Set up the argument struct */ 1809 nfs_init_commit(data, head, NULL, cinfo); 1810 atomic_inc(&cinfo->mds->rpcs_out); 1811 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), 1812 data->mds_ops, how, 0); 1813 } 1814 1815 /* 1816 * COMMIT call returned 1817 */ 1818 static void nfs_commit_done(struct rpc_task *task, void *calldata) 1819 { 1820 struct nfs_commit_data *data = calldata; 1821 1822 dprintk("NFS: %5u nfs_commit_done (status %d)\n", 1823 task->tk_pid, task->tk_status); 1824 1825 /* Call the NFS version-specific code */ 1826 NFS_PROTO(data->inode)->commit_done(task, data); 1827 trace_nfs_commit_done(data); 1828 } 1829 1830 static void nfs_commit_release_pages(struct nfs_commit_data *data) 1831 { 1832 struct nfs_page *req; 1833 int status = data->task.tk_status; 1834 struct nfs_commit_info cinfo; 1835 struct nfs_server *nfss; 1836 1837 while (!list_empty(&data->pages)) { 1838 req = nfs_list_entry(data->pages.next); 1839 nfs_list_remove_request(req); 1840 if (req->wb_page) 1841 nfs_clear_page_commit(req->wb_page); 1842 1843 dprintk("NFS: commit (%s/%llu %d@%lld)", 1844 nfs_req_openctx(req)->dentry->d_sb->s_id, 1845 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)), 1846 req->wb_bytes, 1847 (long long)req_offset(req)); 1848 if (status < 0) { 1849 if (req->wb_page) { 1850 nfs_mapping_set_error(req->wb_page, status); 1851 nfs_inode_remove_request(req); 1852 } 1853 dprintk_cont(", error = %d\n", status); 1854 goto next; 1855 } 1856 1857 /* Okay, COMMIT succeeded, apparently. Check the verifier 1858 * returned by the server against all stored verfs. */ 1859 if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { 1860 /* We have a match */ 1861 if (req->wb_page) 1862 nfs_inode_remove_request(req); 1863 dprintk_cont(" OK\n"); 1864 goto next; 1865 } 1866 /* We have a mismatch. Write the page again */ 1867 dprintk_cont(" mismatch\n"); 1868 nfs_mark_request_dirty(req); 1869 set_bit(NFS_CONTEXT_RESEND_WRITES, &nfs_req_openctx(req)->flags); 1870 next: 1871 nfs_unlock_and_release_request(req); 1872 /* Latency breaker */ 1873 cond_resched(); 1874 } 1875 nfss = NFS_SERVER(data->inode); 1876 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 1877 clear_bdi_congested(inode_to_bdi(data->inode), BLK_RW_ASYNC); 1878 1879 nfs_init_cinfo(&cinfo, data->inode, data->dreq); 1880 nfs_commit_end(cinfo.mds); 1881 } 1882 1883 static void nfs_commit_release(void *calldata) 1884 { 1885 struct nfs_commit_data *data = calldata; 1886 1887 data->completion_ops->completion(data); 1888 nfs_commitdata_release(calldata); 1889 } 1890 1891 static const struct rpc_call_ops nfs_commit_ops = { 1892 .rpc_call_prepare = nfs_commit_prepare, 1893 .rpc_call_done = nfs_commit_done, 1894 .rpc_release = nfs_commit_release, 1895 }; 1896 1897 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = { 1898 .completion = nfs_commit_release_pages, 1899 .resched_write = nfs_commit_resched_write, 1900 }; 1901 1902 int nfs_generic_commit_list(struct inode *inode, struct list_head *head, 1903 int how, struct nfs_commit_info *cinfo) 1904 { 1905 int status; 1906 1907 status = pnfs_commit_list(inode, head, how, cinfo); 1908 if (status == PNFS_NOT_ATTEMPTED) 1909 status = nfs_commit_list(inode, head, how, cinfo); 1910 return status; 1911 } 1912 1913 static int __nfs_commit_inode(struct inode *inode, int how, 1914 struct writeback_control *wbc) 1915 { 1916 LIST_HEAD(head); 1917 struct nfs_commit_info cinfo; 1918 int may_wait = how & FLUSH_SYNC; 1919 int ret, nscan; 1920 1921 nfs_init_cinfo_from_inode(&cinfo, inode); 1922 nfs_commit_begin(cinfo.mds); 1923 for (;;) { 1924 ret = nscan = nfs_scan_commit(inode, &head, &cinfo); 1925 if (ret <= 0) 1926 break; 1927 ret = nfs_generic_commit_list(inode, &head, how, &cinfo); 1928 if (ret < 0) 1929 break; 1930 ret = 0; 1931 if (wbc && wbc->sync_mode == WB_SYNC_NONE) { 1932 if (nscan < wbc->nr_to_write) 1933 wbc->nr_to_write -= nscan; 1934 else 1935 wbc->nr_to_write = 0; 1936 } 1937 if (nscan < INT_MAX) 1938 break; 1939 cond_resched(); 1940 } 1941 nfs_commit_end(cinfo.mds); 1942 if (ret || !may_wait) 1943 return ret; 1944 return wait_on_commit(cinfo.mds); 1945 } 1946 1947 int nfs_commit_inode(struct inode *inode, int how) 1948 { 1949 return __nfs_commit_inode(inode, how, NULL); 1950 } 1951 EXPORT_SYMBOL_GPL(nfs_commit_inode); 1952 1953 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1954 { 1955 struct nfs_inode *nfsi = NFS_I(inode); 1956 int flags = FLUSH_SYNC; 1957 int ret = 0; 1958 1959 if (wbc->sync_mode == WB_SYNC_NONE) { 1960 /* no commits means nothing needs to be done */ 1961 if (!atomic_long_read(&nfsi->commit_info.ncommit)) 1962 goto check_requests_outstanding; 1963 1964 /* Don't commit yet if this is a non-blocking flush and there 1965 * are a lot of outstanding writes for this mapping. 1966 */ 1967 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) 1968 goto out_mark_dirty; 1969 1970 /* don't wait for the COMMIT response */ 1971 flags = 0; 1972 } 1973 1974 ret = __nfs_commit_inode(inode, flags, wbc); 1975 if (!ret) { 1976 if (flags & FLUSH_SYNC) 1977 return 0; 1978 } else if (atomic_long_read(&nfsi->commit_info.ncommit)) 1979 goto out_mark_dirty; 1980 1981 check_requests_outstanding: 1982 if (!atomic_read(&nfsi->commit_info.rpcs_out)) 1983 return ret; 1984 out_mark_dirty: 1985 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1986 return ret; 1987 } 1988 EXPORT_SYMBOL_GPL(nfs_write_inode); 1989 1990 /* 1991 * Wrapper for filemap_write_and_wait_range() 1992 * 1993 * Needed for pNFS in order to ensure data becomes visible to the 1994 * client. 1995 */ 1996 int nfs_filemap_write_and_wait_range(struct address_space *mapping, 1997 loff_t lstart, loff_t lend) 1998 { 1999 int ret; 2000 2001 ret = filemap_write_and_wait_range(mapping, lstart, lend); 2002 if (ret == 0) 2003 ret = pnfs_sync_inode(mapping->host, true); 2004 return ret; 2005 } 2006 EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range); 2007 2008 /* 2009 * flush the inode to disk. 2010 */ 2011 int nfs_wb_all(struct inode *inode) 2012 { 2013 int ret; 2014 2015 trace_nfs_writeback_inode_enter(inode); 2016 2017 ret = filemap_write_and_wait(inode->i_mapping); 2018 if (ret) 2019 goto out; 2020 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2021 if (ret < 0) 2022 goto out; 2023 pnfs_sync_inode(inode, true); 2024 ret = 0; 2025 2026 out: 2027 trace_nfs_writeback_inode_exit(inode, ret); 2028 return ret; 2029 } 2030 EXPORT_SYMBOL_GPL(nfs_wb_all); 2031 2032 int nfs_wb_page_cancel(struct inode *inode, struct page *page) 2033 { 2034 struct nfs_page *req; 2035 int ret = 0; 2036 2037 wait_on_page_writeback(page); 2038 2039 /* blocking call to cancel all requests and join to a single (head) 2040 * request */ 2041 req = nfs_lock_and_join_requests(page); 2042 2043 if (IS_ERR(req)) { 2044 ret = PTR_ERR(req); 2045 } else if (req) { 2046 /* all requests from this page have been cancelled by 2047 * nfs_lock_and_join_requests, so just remove the head 2048 * request from the inode / page_private pointer and 2049 * release it */ 2050 nfs_inode_remove_request(req); 2051 nfs_unlock_and_release_request(req); 2052 } 2053 2054 return ret; 2055 } 2056 2057 /* 2058 * Write back all requests on one page - we do this before reading it. 2059 */ 2060 int nfs_wb_page(struct inode *inode, struct page *page) 2061 { 2062 loff_t range_start = page_file_offset(page); 2063 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); 2064 struct writeback_control wbc = { 2065 .sync_mode = WB_SYNC_ALL, 2066 .nr_to_write = 0, 2067 .range_start = range_start, 2068 .range_end = range_end, 2069 }; 2070 int ret; 2071 2072 trace_nfs_writeback_page_enter(inode); 2073 2074 for (;;) { 2075 wait_on_page_writeback(page); 2076 if (clear_page_dirty_for_io(page)) { 2077 ret = nfs_writepage_locked(page, &wbc); 2078 if (ret < 0) 2079 goto out_error; 2080 continue; 2081 } 2082 ret = 0; 2083 if (!PagePrivate(page)) 2084 break; 2085 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2086 if (ret < 0) 2087 goto out_error; 2088 } 2089 out_error: 2090 trace_nfs_writeback_page_exit(inode, ret); 2091 return ret; 2092 } 2093 2094 #ifdef CONFIG_MIGRATION 2095 int nfs_migrate_page(struct address_space *mapping, struct page *newpage, 2096 struct page *page, enum migrate_mode mode) 2097 { 2098 /* 2099 * If PagePrivate is set, then the page is currently associated with 2100 * an in-progress read or write request. Don't try to migrate it. 2101 * 2102 * FIXME: we could do this in principle, but we'll need a way to ensure 2103 * that we can safely release the inode reference while holding 2104 * the page lock. 2105 */ 2106 if (PagePrivate(page)) 2107 return -EBUSY; 2108 2109 if (!nfs_fscache_release_page(page, GFP_KERNEL)) 2110 return -EBUSY; 2111 2112 return migrate_page(mapping, newpage, page, mode); 2113 } 2114 #endif 2115 2116 int __init nfs_init_writepagecache(void) 2117 { 2118 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 2119 sizeof(struct nfs_pgio_header), 2120 0, SLAB_HWCACHE_ALIGN, 2121 NULL); 2122 if (nfs_wdata_cachep == NULL) 2123 return -ENOMEM; 2124 2125 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, 2126 nfs_wdata_cachep); 2127 if (nfs_wdata_mempool == NULL) 2128 goto out_destroy_write_cache; 2129 2130 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", 2131 sizeof(struct nfs_commit_data), 2132 0, SLAB_HWCACHE_ALIGN, 2133 NULL); 2134 if (nfs_cdata_cachep == NULL) 2135 goto out_destroy_write_mempool; 2136 2137 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, 2138 nfs_cdata_cachep); 2139 if (nfs_commit_mempool == NULL) 2140 goto out_destroy_commit_cache; 2141 2142 /* 2143 * NFS congestion size, scale with available memory. 2144 * 2145 * 64MB: 8192k 2146 * 128MB: 11585k 2147 * 256MB: 16384k 2148 * 512MB: 23170k 2149 * 1GB: 32768k 2150 * 2GB: 46340k 2151 * 4GB: 65536k 2152 * 8GB: 92681k 2153 * 16GB: 131072k 2154 * 2155 * This allows larger machines to have larger/more transfers. 2156 * Limit the default to 256M 2157 */ 2158 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); 2159 if (nfs_congestion_kb > 256*1024) 2160 nfs_congestion_kb = 256*1024; 2161 2162 return 0; 2163 2164 out_destroy_commit_cache: 2165 kmem_cache_destroy(nfs_cdata_cachep); 2166 out_destroy_write_mempool: 2167 mempool_destroy(nfs_wdata_mempool); 2168 out_destroy_write_cache: 2169 kmem_cache_destroy(nfs_wdata_cachep); 2170 return -ENOMEM; 2171 } 2172 2173 void nfs_destroy_writepagecache(void) 2174 { 2175 mempool_destroy(nfs_commit_mempool); 2176 kmem_cache_destroy(nfs_cdata_cachep); 2177 mempool_destroy(nfs_wdata_mempool); 2178 kmem_cache_destroy(nfs_wdata_cachep); 2179 } 2180 2181 static const struct nfs_rw_ops nfs_rw_write_ops = { 2182 .rw_alloc_header = nfs_writehdr_alloc, 2183 .rw_free_header = nfs_writehdr_free, 2184 .rw_done = nfs_writeback_done, 2185 .rw_result = nfs_writeback_result, 2186 .rw_initiate = nfs_initiate_write, 2187 }; 2188