1 /* 2 * linux/fs/nfs/write.c 3 * 4 * Write file data over NFS. 5 * 6 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> 7 */ 8 9 #include <linux/types.h> 10 #include <linux/slab.h> 11 #include <linux/mm.h> 12 #include <linux/pagemap.h> 13 #include <linux/file.h> 14 #include <linux/writeback.h> 15 #include <linux/swap.h> 16 #include <linux/migrate.h> 17 18 #include <linux/sunrpc/clnt.h> 19 #include <linux/nfs_fs.h> 20 #include <linux/nfs_mount.h> 21 #include <linux/nfs_page.h> 22 #include <linux/backing-dev.h> 23 #include <linux/export.h> 24 #include <linux/freezer.h> 25 #include <linux/wait.h> 26 27 #include <linux/uaccess.h> 28 29 #include "delegation.h" 30 #include "internal.h" 31 #include "iostat.h" 32 #include "nfs4_fs.h" 33 #include "fscache.h" 34 #include "pnfs.h" 35 36 #include "nfstrace.h" 37 38 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 39 40 #define MIN_POOL_WRITE (32) 41 #define MIN_POOL_COMMIT (4) 42 43 struct nfs_io_completion { 44 void (*complete)(void *data); 45 void *data; 46 struct kref refcount; 47 }; 48 49 /* 50 * Local function declarations 51 */ 52 static void nfs_redirty_request(struct nfs_page *req); 53 static const struct rpc_call_ops nfs_commit_ops; 54 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 55 static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 56 static const struct nfs_rw_ops nfs_rw_write_ops; 57 static void nfs_clear_request_commit(struct nfs_page *req); 58 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 59 struct inode *inode); 60 static struct nfs_page * 61 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 62 struct page *page); 63 64 static struct kmem_cache *nfs_wdata_cachep; 65 static mempool_t *nfs_wdata_mempool; 66 static struct kmem_cache *nfs_cdata_cachep; 67 static mempool_t *nfs_commit_mempool; 68 69 struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail) 70 { 71 struct nfs_commit_data *p; 72 73 if (never_fail) 74 p = mempool_alloc(nfs_commit_mempool, GFP_NOIO); 75 else { 76 /* It is OK to do some reclaim, not no safe to wait 77 * for anything to be returned to the pool. 78 * mempool_alloc() cannot handle that particular combination, 79 * so we need two separate attempts. 80 */ 81 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); 82 if (!p) 83 p = kmem_cache_alloc(nfs_cdata_cachep, GFP_NOIO | 84 __GFP_NOWARN | __GFP_NORETRY); 85 if (!p) 86 return NULL; 87 } 88 89 memset(p, 0, sizeof(*p)); 90 INIT_LIST_HEAD(&p->pages); 91 return p; 92 } 93 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); 94 95 void nfs_commit_free(struct nfs_commit_data *p) 96 { 97 mempool_free(p, nfs_commit_mempool); 98 } 99 EXPORT_SYMBOL_GPL(nfs_commit_free); 100 101 static struct nfs_pgio_header *nfs_writehdr_alloc(void) 102 { 103 struct nfs_pgio_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOIO); 104 105 memset(p, 0, sizeof(*p)); 106 p->rw_mode = FMODE_WRITE; 107 return p; 108 } 109 110 static void nfs_writehdr_free(struct nfs_pgio_header *hdr) 111 { 112 mempool_free(hdr, nfs_wdata_mempool); 113 } 114 115 static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags) 116 { 117 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags); 118 } 119 120 static void nfs_io_completion_init(struct nfs_io_completion *ioc, 121 void (*complete)(void *), void *data) 122 { 123 ioc->complete = complete; 124 ioc->data = data; 125 kref_init(&ioc->refcount); 126 } 127 128 static void nfs_io_completion_release(struct kref *kref) 129 { 130 struct nfs_io_completion *ioc = container_of(kref, 131 struct nfs_io_completion, refcount); 132 ioc->complete(ioc->data); 133 kfree(ioc); 134 } 135 136 static void nfs_io_completion_get(struct nfs_io_completion *ioc) 137 { 138 if (ioc != NULL) 139 kref_get(&ioc->refcount); 140 } 141 142 static void nfs_io_completion_put(struct nfs_io_completion *ioc) 143 { 144 if (ioc != NULL) 145 kref_put(&ioc->refcount, nfs_io_completion_release); 146 } 147 148 static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) 149 { 150 ctx->error = error; 151 smp_wmb(); 152 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 153 } 154 155 static struct nfs_page * 156 nfs_page_private_request(struct page *page) 157 { 158 if (!PagePrivate(page)) 159 return NULL; 160 return (struct nfs_page *)page_private(page); 161 } 162 163 /* 164 * nfs_page_find_head_request_locked - find head request associated with @page 165 * 166 * must be called while holding the inode lock. 167 * 168 * returns matching head request with reference held, or NULL if not found. 169 */ 170 static struct nfs_page * 171 nfs_page_find_private_request(struct page *page) 172 { 173 struct address_space *mapping = page_file_mapping(page); 174 struct nfs_page *req; 175 176 if (!PagePrivate(page)) 177 return NULL; 178 spin_lock(&mapping->private_lock); 179 req = nfs_page_private_request(page); 180 if (req) { 181 WARN_ON_ONCE(req->wb_head != req); 182 kref_get(&req->wb_kref); 183 } 184 spin_unlock(&mapping->private_lock); 185 return req; 186 } 187 188 static struct nfs_page * 189 nfs_page_find_swap_request(struct page *page) 190 { 191 struct inode *inode = page_file_mapping(page)->host; 192 struct nfs_inode *nfsi = NFS_I(inode); 193 struct nfs_page *req = NULL; 194 if (!PageSwapCache(page)) 195 return NULL; 196 mutex_lock(&nfsi->commit_mutex); 197 if (PageSwapCache(page)) { 198 req = nfs_page_search_commits_for_head_request_locked(nfsi, 199 page); 200 if (req) { 201 WARN_ON_ONCE(req->wb_head != req); 202 kref_get(&req->wb_kref); 203 } 204 } 205 mutex_unlock(&nfsi->commit_mutex); 206 return req; 207 } 208 209 /* 210 * nfs_page_find_head_request - find head request associated with @page 211 * 212 * returns matching head request with reference held, or NULL if not found. 213 */ 214 static struct nfs_page *nfs_page_find_head_request(struct page *page) 215 { 216 struct nfs_page *req; 217 218 req = nfs_page_find_private_request(page); 219 if (!req) 220 req = nfs_page_find_swap_request(page); 221 return req; 222 } 223 224 /* Adjust the file length if we're writing beyond the end */ 225 static void nfs_grow_file(struct page *page, unsigned int offset, unsigned int count) 226 { 227 struct inode *inode = page_file_mapping(page)->host; 228 loff_t end, i_size; 229 pgoff_t end_index; 230 231 spin_lock(&inode->i_lock); 232 i_size = i_size_read(inode); 233 end_index = (i_size - 1) >> PAGE_SHIFT; 234 if (i_size > 0 && page_index(page) < end_index) 235 goto out; 236 end = page_file_offset(page) + ((loff_t)offset+count); 237 if (i_size >= end) 238 goto out; 239 i_size_write(inode, end); 240 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); 241 out: 242 spin_unlock(&inode->i_lock); 243 } 244 245 /* A writeback failed: mark the page as bad, and invalidate the page cache */ 246 static void nfs_set_pageerror(struct page *page) 247 { 248 nfs_zap_mapping(page_file_mapping(page)->host, page_file_mapping(page)); 249 } 250 251 /* 252 * nfs_page_group_search_locked 253 * @head - head request of page group 254 * @page_offset - offset into page 255 * 256 * Search page group with head @head to find a request that contains the 257 * page offset @page_offset. 258 * 259 * Returns a pointer to the first matching nfs request, or NULL if no 260 * match is found. 261 * 262 * Must be called with the page group lock held 263 */ 264 static struct nfs_page * 265 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) 266 { 267 struct nfs_page *req; 268 269 req = head; 270 do { 271 if (page_offset >= req->wb_pgbase && 272 page_offset < (req->wb_pgbase + req->wb_bytes)) 273 return req; 274 275 req = req->wb_this_page; 276 } while (req != head); 277 278 return NULL; 279 } 280 281 /* 282 * nfs_page_group_covers_page 283 * @head - head request of page group 284 * 285 * Return true if the page group with head @head covers the whole page, 286 * returns false otherwise 287 */ 288 static bool nfs_page_group_covers_page(struct nfs_page *req) 289 { 290 struct nfs_page *tmp; 291 unsigned int pos = 0; 292 unsigned int len = nfs_page_length(req->wb_page); 293 294 nfs_page_group_lock(req); 295 296 for (;;) { 297 tmp = nfs_page_group_search_locked(req->wb_head, pos); 298 if (!tmp) 299 break; 300 pos = tmp->wb_pgbase + tmp->wb_bytes; 301 } 302 303 nfs_page_group_unlock(req); 304 return pos >= len; 305 } 306 307 /* We can set the PG_uptodate flag if we see that a write request 308 * covers the full page. 309 */ 310 static void nfs_mark_uptodate(struct nfs_page *req) 311 { 312 if (PageUptodate(req->wb_page)) 313 return; 314 if (!nfs_page_group_covers_page(req)) 315 return; 316 SetPageUptodate(req->wb_page); 317 } 318 319 static int wb_priority(struct writeback_control *wbc) 320 { 321 int ret = 0; 322 323 if (wbc->sync_mode == WB_SYNC_ALL) 324 ret = FLUSH_COND_STABLE; 325 return ret; 326 } 327 328 /* 329 * NFS congestion control 330 */ 331 332 int nfs_congestion_kb; 333 334 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) 335 #define NFS_CONGESTION_OFF_THRESH \ 336 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) 337 338 static void nfs_set_page_writeback(struct page *page) 339 { 340 struct inode *inode = page_file_mapping(page)->host; 341 struct nfs_server *nfss = NFS_SERVER(inode); 342 int ret = test_set_page_writeback(page); 343 344 WARN_ON_ONCE(ret != 0); 345 346 if (atomic_long_inc_return(&nfss->writeback) > 347 NFS_CONGESTION_ON_THRESH) 348 set_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 349 } 350 351 static void nfs_end_page_writeback(struct nfs_page *req) 352 { 353 struct inode *inode = page_file_mapping(req->wb_page)->host; 354 struct nfs_server *nfss = NFS_SERVER(inode); 355 bool is_done; 356 357 is_done = nfs_page_group_sync_on_bit(req, PG_WB_END); 358 nfs_unlock_request(req); 359 if (!is_done) 360 return; 361 362 end_page_writeback(req->wb_page); 363 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 364 clear_bdi_congested(inode_to_bdi(inode), BLK_RW_ASYNC); 365 } 366 367 /* 368 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req 369 * 370 * this is a helper function for nfs_lock_and_join_requests 371 * 372 * @inode - inode associated with request page group, must be holding inode lock 373 * @head - head request of page group, must be holding head lock 374 * @req - request that couldn't lock and needs to wait on the req bit lock 375 * 376 * NOTE: this must be called holding page_group bit lock 377 * which will be released before returning. 378 * 379 * returns 0 on success, < 0 on error. 380 */ 381 static void 382 nfs_unroll_locks(struct inode *inode, struct nfs_page *head, 383 struct nfs_page *req) 384 { 385 struct nfs_page *tmp; 386 387 /* relinquish all the locks successfully grabbed this run */ 388 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { 389 if (!kref_read(&tmp->wb_kref)) 390 continue; 391 nfs_unlock_and_release_request(tmp); 392 } 393 } 394 395 /* 396 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests 397 * 398 * @destroy_list - request list (using wb_this_page) terminated by @old_head 399 * @old_head - the old head of the list 400 * 401 * All subrequests must be locked and removed from all lists, so at this point 402 * they are only "active" in this function, and possibly in nfs_wait_on_request 403 * with a reference held by some other context. 404 */ 405 static void 406 nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, 407 struct nfs_page *old_head, 408 struct inode *inode) 409 { 410 while (destroy_list) { 411 struct nfs_page *subreq = destroy_list; 412 413 destroy_list = (subreq->wb_this_page == old_head) ? 414 NULL : subreq->wb_this_page; 415 416 WARN_ON_ONCE(old_head != subreq->wb_head); 417 418 /* make sure old group is not used */ 419 subreq->wb_this_page = subreq; 420 421 clear_bit(PG_REMOVE, &subreq->wb_flags); 422 423 /* Note: races with nfs_page_group_destroy() */ 424 if (!kref_read(&subreq->wb_kref)) { 425 /* Check if we raced with nfs_page_group_destroy() */ 426 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) 427 nfs_free_request(subreq); 428 continue; 429 } 430 431 subreq->wb_head = subreq; 432 433 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { 434 nfs_release_request(subreq); 435 atomic_long_dec(&NFS_I(inode)->nrequests); 436 } 437 438 /* subreq is now totally disconnected from page group or any 439 * write / commit lists. last chance to wake any waiters */ 440 nfs_unlock_and_release_request(subreq); 441 } 442 } 443 444 /* 445 * nfs_lock_and_join_requests - join all subreqs to the head req and return 446 * a locked reference, cancelling any pending 447 * operations for this page. 448 * 449 * @page - the page used to lookup the "page group" of nfs_page structures 450 * 451 * This function joins all sub requests to the head request by first 452 * locking all requests in the group, cancelling any pending operations 453 * and finally updating the head request to cover the whole range covered by 454 * the (former) group. All subrequests are removed from any write or commit 455 * lists, unlinked from the group and destroyed. 456 * 457 * Returns a locked, referenced pointer to the head request - which after 458 * this call is guaranteed to be the only request associated with the page. 459 * Returns NULL if no requests are found for @page, or a ERR_PTR if an 460 * error was encountered. 461 */ 462 static struct nfs_page * 463 nfs_lock_and_join_requests(struct page *page) 464 { 465 struct inode *inode = page_file_mapping(page)->host; 466 struct nfs_page *head, *subreq; 467 struct nfs_page *destroy_list = NULL; 468 unsigned int total_bytes; 469 int ret; 470 471 try_again: 472 /* 473 * A reference is taken only on the head request which acts as a 474 * reference to the whole page group - the group will not be destroyed 475 * until the head reference is released. 476 */ 477 head = nfs_page_find_head_request(page); 478 if (!head) 479 return NULL; 480 481 /* lock the page head first in order to avoid an ABBA inefficiency */ 482 if (!nfs_lock_request(head)) { 483 ret = nfs_wait_on_request(head); 484 nfs_release_request(head); 485 if (ret < 0) 486 return ERR_PTR(ret); 487 goto try_again; 488 } 489 490 /* Ensure that nobody removed the request before we locked it */ 491 if (head != nfs_page_private_request(page) && !PageSwapCache(page)) { 492 nfs_unlock_and_release_request(head); 493 goto try_again; 494 } 495 496 ret = nfs_page_group_lock(head); 497 if (ret < 0) { 498 nfs_unlock_and_release_request(head); 499 return ERR_PTR(ret); 500 } 501 502 /* lock each request in the page group */ 503 total_bytes = head->wb_bytes; 504 for (subreq = head->wb_this_page; subreq != head; 505 subreq = subreq->wb_this_page) { 506 507 if (!kref_get_unless_zero(&subreq->wb_kref)) { 508 if (subreq->wb_offset == head->wb_offset + total_bytes) 509 total_bytes += subreq->wb_bytes; 510 continue; 511 } 512 513 while (!nfs_lock_request(subreq)) { 514 /* 515 * Unlock page to allow nfs_page_group_sync_on_bit() 516 * to succeed 517 */ 518 nfs_page_group_unlock(head); 519 ret = nfs_wait_on_request(subreq); 520 if (!ret) 521 ret = nfs_page_group_lock(head); 522 if (ret < 0) { 523 nfs_unroll_locks(inode, head, subreq); 524 nfs_release_request(subreq); 525 nfs_unlock_and_release_request(head); 526 return ERR_PTR(ret); 527 } 528 } 529 /* 530 * Subrequests are always contiguous, non overlapping 531 * and in order - but may be repeated (mirrored writes). 532 */ 533 if (subreq->wb_offset == (head->wb_offset + total_bytes)) { 534 /* keep track of how many bytes this group covers */ 535 total_bytes += subreq->wb_bytes; 536 } else if (WARN_ON_ONCE(subreq->wb_offset < head->wb_offset || 537 ((subreq->wb_offset + subreq->wb_bytes) > 538 (head->wb_offset + total_bytes)))) { 539 nfs_page_group_unlock(head); 540 nfs_unroll_locks(inode, head, subreq); 541 nfs_unlock_and_release_request(subreq); 542 nfs_unlock_and_release_request(head); 543 return ERR_PTR(-EIO); 544 } 545 } 546 547 /* Now that all requests are locked, make sure they aren't on any list. 548 * Commit list removal accounting is done after locks are dropped */ 549 subreq = head; 550 do { 551 nfs_clear_request_commit(subreq); 552 subreq = subreq->wb_this_page; 553 } while (subreq != head); 554 555 /* unlink subrequests from head, destroy them later */ 556 if (head->wb_this_page != head) { 557 /* destroy list will be terminated by head */ 558 destroy_list = head->wb_this_page; 559 head->wb_this_page = head; 560 561 /* change head request to cover whole range that 562 * the former page group covered */ 563 head->wb_bytes = total_bytes; 564 } 565 566 /* Postpone destruction of this request */ 567 if (test_and_clear_bit(PG_REMOVE, &head->wb_flags)) { 568 set_bit(PG_INODE_REF, &head->wb_flags); 569 kref_get(&head->wb_kref); 570 atomic_long_inc(&NFS_I(inode)->nrequests); 571 } 572 573 nfs_page_group_unlock(head); 574 575 nfs_destroy_unlinked_subrequests(destroy_list, head, inode); 576 577 /* Did we lose a race with nfs_inode_remove_request()? */ 578 if (!(PagePrivate(page) || PageSwapCache(page))) { 579 nfs_unlock_and_release_request(head); 580 return NULL; 581 } 582 583 /* still holds ref on head from nfs_page_find_head_request 584 * and still has lock on head from lock loop */ 585 return head; 586 } 587 588 static void nfs_write_error_remove_page(struct nfs_page *req) 589 { 590 nfs_end_page_writeback(req); 591 generic_error_remove_page(page_file_mapping(req->wb_page), 592 req->wb_page); 593 nfs_release_request(req); 594 } 595 596 static bool 597 nfs_error_is_fatal_on_server(int err) 598 { 599 switch (err) { 600 case 0: 601 case -ERESTARTSYS: 602 case -EINTR: 603 return false; 604 } 605 return nfs_error_is_fatal(err); 606 } 607 608 /* 609 * Find an associated nfs write request, and prepare to flush it out 610 * May return an error if the user signalled nfs_wait_on_request(). 611 */ 612 static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio, 613 struct page *page) 614 { 615 struct nfs_page *req; 616 int ret = 0; 617 618 req = nfs_lock_and_join_requests(page); 619 if (!req) 620 goto out; 621 ret = PTR_ERR(req); 622 if (IS_ERR(req)) 623 goto out; 624 625 nfs_set_page_writeback(page); 626 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 627 628 ret = 0; 629 /* If there is a fatal error that covers this write, just exit */ 630 if (nfs_error_is_fatal_on_server(req->wb_context->error)) 631 goto out_launder; 632 633 if (!nfs_pageio_add_request(pgio, req)) { 634 ret = pgio->pg_error; 635 /* 636 * Remove the problematic req upon fatal errors on the server 637 */ 638 if (nfs_error_is_fatal(ret)) { 639 nfs_context_set_write_error(req->wb_context, ret); 640 if (nfs_error_is_fatal_on_server(ret)) 641 goto out_launder; 642 } 643 nfs_redirty_request(req); 644 ret = -EAGAIN; 645 } else 646 nfs_add_stats(page_file_mapping(page)->host, 647 NFSIOS_WRITEPAGES, 1); 648 out: 649 return ret; 650 out_launder: 651 nfs_write_error_remove_page(req); 652 return ret; 653 } 654 655 static int nfs_do_writepage(struct page *page, struct writeback_control *wbc, 656 struct nfs_pageio_descriptor *pgio) 657 { 658 int ret; 659 660 nfs_pageio_cond_complete(pgio, page_index(page)); 661 ret = nfs_page_async_flush(pgio, page); 662 if (ret == -EAGAIN) { 663 redirty_page_for_writepage(wbc, page); 664 ret = 0; 665 } 666 return ret; 667 } 668 669 /* 670 * Write an mmapped page to the server. 671 */ 672 static int nfs_writepage_locked(struct page *page, 673 struct writeback_control *wbc) 674 { 675 struct nfs_pageio_descriptor pgio; 676 struct inode *inode = page_file_mapping(page)->host; 677 int err; 678 679 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 680 nfs_pageio_init_write(&pgio, inode, 0, 681 false, &nfs_async_write_completion_ops); 682 err = nfs_do_writepage(page, wbc, &pgio); 683 nfs_pageio_complete(&pgio); 684 if (err < 0) 685 return err; 686 if (pgio.pg_error < 0) 687 return pgio.pg_error; 688 return 0; 689 } 690 691 int nfs_writepage(struct page *page, struct writeback_control *wbc) 692 { 693 int ret; 694 695 ret = nfs_writepage_locked(page, wbc); 696 unlock_page(page); 697 return ret; 698 } 699 700 static int nfs_writepages_callback(struct page *page, struct writeback_control *wbc, void *data) 701 { 702 int ret; 703 704 ret = nfs_do_writepage(page, wbc, data); 705 unlock_page(page); 706 return ret; 707 } 708 709 static void nfs_io_completion_commit(void *inode) 710 { 711 nfs_commit_inode(inode, 0); 712 } 713 714 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) 715 { 716 struct inode *inode = mapping->host; 717 struct nfs_pageio_descriptor pgio; 718 struct nfs_io_completion *ioc = nfs_io_completion_alloc(GFP_NOFS); 719 int err; 720 721 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); 722 723 if (ioc) 724 nfs_io_completion_init(ioc, nfs_io_completion_commit, inode); 725 726 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc), false, 727 &nfs_async_write_completion_ops); 728 pgio.pg_io_completion = ioc; 729 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 730 nfs_pageio_complete(&pgio); 731 nfs_io_completion_put(ioc); 732 733 if (err < 0) 734 goto out_err; 735 err = pgio.pg_error; 736 if (err < 0) 737 goto out_err; 738 return 0; 739 out_err: 740 return err; 741 } 742 743 /* 744 * Insert a write request into an inode 745 */ 746 static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req) 747 { 748 struct address_space *mapping = page_file_mapping(req->wb_page); 749 struct nfs_inode *nfsi = NFS_I(inode); 750 751 WARN_ON_ONCE(req->wb_this_page != req); 752 753 /* Lock the request! */ 754 nfs_lock_request(req); 755 756 /* 757 * Swap-space should not get truncated. Hence no need to plug the race 758 * with invalidate/truncate. 759 */ 760 spin_lock(&mapping->private_lock); 761 if (!nfs_have_writebacks(inode) && 762 NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) { 763 spin_lock(&inode->i_lock); 764 inode->i_version++; 765 spin_unlock(&inode->i_lock); 766 } 767 if (likely(!PageSwapCache(req->wb_page))) { 768 set_bit(PG_MAPPED, &req->wb_flags); 769 SetPagePrivate(req->wb_page); 770 set_page_private(req->wb_page, (unsigned long)req); 771 } 772 spin_unlock(&mapping->private_lock); 773 atomic_long_inc(&nfsi->nrequests); 774 /* this a head request for a page group - mark it as having an 775 * extra reference so sub groups can follow suit. 776 * This flag also informs pgio layer when to bump nrequests when 777 * adding subrequests. */ 778 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); 779 kref_get(&req->wb_kref); 780 } 781 782 /* 783 * Remove a write request from an inode 784 */ 785 static void nfs_inode_remove_request(struct nfs_page *req) 786 { 787 struct address_space *mapping = page_file_mapping(req->wb_page); 788 struct inode *inode = mapping->host; 789 struct nfs_inode *nfsi = NFS_I(inode); 790 struct nfs_page *head; 791 792 atomic_long_dec(&nfsi->nrequests); 793 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { 794 head = req->wb_head; 795 796 spin_lock(&mapping->private_lock); 797 if (likely(head->wb_page && !PageSwapCache(head->wb_page))) { 798 set_page_private(head->wb_page, 0); 799 ClearPagePrivate(head->wb_page); 800 clear_bit(PG_MAPPED, &head->wb_flags); 801 } 802 spin_unlock(&mapping->private_lock); 803 } 804 805 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) 806 nfs_release_request(req); 807 } 808 809 static void 810 nfs_mark_request_dirty(struct nfs_page *req) 811 { 812 if (req->wb_page) 813 __set_page_dirty_nobuffers(req->wb_page); 814 } 815 816 /* 817 * nfs_page_search_commits_for_head_request_locked 818 * 819 * Search through commit lists on @inode for the head request for @page. 820 * Must be called while holding the inode (which is cinfo) lock. 821 * 822 * Returns the head request if found, or NULL if not found. 823 */ 824 static struct nfs_page * 825 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 826 struct page *page) 827 { 828 struct nfs_page *freq, *t; 829 struct nfs_commit_info cinfo; 830 struct inode *inode = &nfsi->vfs_inode; 831 832 nfs_init_cinfo_from_inode(&cinfo, inode); 833 834 /* search through pnfs commit lists */ 835 freq = pnfs_search_commit_reqs(inode, &cinfo, page); 836 if (freq) 837 return freq->wb_head; 838 839 /* Linearly search the commit list for the correct request */ 840 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { 841 if (freq->wb_page == page) 842 return freq->wb_head; 843 } 844 845 return NULL; 846 } 847 848 /** 849 * nfs_request_add_commit_list_locked - add request to a commit list 850 * @req: pointer to a struct nfs_page 851 * @dst: commit list head 852 * @cinfo: holds list lock and accounting info 853 * 854 * This sets the PG_CLEAN bit, updates the cinfo count of 855 * number of outstanding requests requiring a commit as well as 856 * the MM page stats. 857 * 858 * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the 859 * nfs_page lock. 860 */ 861 void 862 nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, 863 struct nfs_commit_info *cinfo) 864 { 865 set_bit(PG_CLEAN, &req->wb_flags); 866 nfs_list_add_request(req, dst); 867 atomic_long_inc(&cinfo->mds->ncommit); 868 } 869 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); 870 871 /** 872 * nfs_request_add_commit_list - add request to a commit list 873 * @req: pointer to a struct nfs_page 874 * @dst: commit list head 875 * @cinfo: holds list lock and accounting info 876 * 877 * This sets the PG_CLEAN bit, updates the cinfo count of 878 * number of outstanding requests requiring a commit as well as 879 * the MM page stats. 880 * 881 * The caller must _not_ hold the cinfo->lock, but must be 882 * holding the nfs_page lock. 883 */ 884 void 885 nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo) 886 { 887 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 888 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); 889 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 890 if (req->wb_page) 891 nfs_mark_page_unstable(req->wb_page, cinfo); 892 } 893 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 894 895 /** 896 * nfs_request_remove_commit_list - Remove request from a commit list 897 * @req: pointer to a nfs_page 898 * @cinfo: holds list lock and accounting info 899 * 900 * This clears the PG_CLEAN bit, and updates the cinfo's count of 901 * number of outstanding requests requiring a commit 902 * It does not update the MM page stats. 903 * 904 * The caller _must_ hold the cinfo->lock and the nfs_page lock. 905 */ 906 void 907 nfs_request_remove_commit_list(struct nfs_page *req, 908 struct nfs_commit_info *cinfo) 909 { 910 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) 911 return; 912 nfs_list_remove_request(req); 913 atomic_long_dec(&cinfo->mds->ncommit); 914 } 915 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); 916 917 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 918 struct inode *inode) 919 { 920 cinfo->inode = inode; 921 cinfo->mds = &NFS_I(inode)->commit_info; 922 cinfo->ds = pnfs_get_ds_info(inode); 923 cinfo->dreq = NULL; 924 cinfo->completion_ops = &nfs_commit_completion_ops; 925 } 926 927 void nfs_init_cinfo(struct nfs_commit_info *cinfo, 928 struct inode *inode, 929 struct nfs_direct_req *dreq) 930 { 931 if (dreq) 932 nfs_init_cinfo_from_dreq(cinfo, dreq); 933 else 934 nfs_init_cinfo_from_inode(cinfo, inode); 935 } 936 EXPORT_SYMBOL_GPL(nfs_init_cinfo); 937 938 /* 939 * Add a request to the inode's commit list. 940 */ 941 void 942 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 943 struct nfs_commit_info *cinfo, u32 ds_commit_idx) 944 { 945 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx)) 946 return; 947 nfs_request_add_commit_list(req, cinfo); 948 } 949 950 static void 951 nfs_clear_page_commit(struct page *page) 952 { 953 dec_node_page_state(page, NR_UNSTABLE_NFS); 954 dec_wb_stat(&inode_to_bdi(page_file_mapping(page)->host)->wb, 955 WB_RECLAIMABLE); 956 } 957 958 /* Called holding the request lock on @req */ 959 static void 960 nfs_clear_request_commit(struct nfs_page *req) 961 { 962 if (test_bit(PG_CLEAN, &req->wb_flags)) { 963 struct inode *inode = d_inode(req->wb_context->dentry); 964 struct nfs_commit_info cinfo; 965 966 nfs_init_cinfo_from_inode(&cinfo, inode); 967 mutex_lock(&NFS_I(inode)->commit_mutex); 968 if (!pnfs_clear_request_commit(req, &cinfo)) { 969 nfs_request_remove_commit_list(req, &cinfo); 970 } 971 mutex_unlock(&NFS_I(inode)->commit_mutex); 972 nfs_clear_page_commit(req->wb_page); 973 } 974 } 975 976 int nfs_write_need_commit(struct nfs_pgio_header *hdr) 977 { 978 if (hdr->verf.committed == NFS_DATA_SYNC) 979 return hdr->lseg == NULL; 980 return hdr->verf.committed != NFS_FILE_SYNC; 981 } 982 983 static void nfs_async_write_init(struct nfs_pgio_header *hdr) 984 { 985 nfs_io_completion_get(hdr->io_completion); 986 } 987 988 static void nfs_write_completion(struct nfs_pgio_header *hdr) 989 { 990 struct nfs_commit_info cinfo; 991 unsigned long bytes = 0; 992 993 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 994 goto out; 995 nfs_init_cinfo_from_inode(&cinfo, hdr->inode); 996 while (!list_empty(&hdr->pages)) { 997 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 998 999 bytes += req->wb_bytes; 1000 nfs_list_remove_request(req); 1001 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && 1002 (hdr->good_bytes < bytes)) { 1003 nfs_set_pageerror(req->wb_page); 1004 nfs_context_set_write_error(req->wb_context, hdr->error); 1005 goto remove_req; 1006 } 1007 if (nfs_write_need_commit(hdr)) { 1008 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); 1009 nfs_mark_request_commit(req, hdr->lseg, &cinfo, 1010 hdr->pgio_mirror_idx); 1011 goto next; 1012 } 1013 remove_req: 1014 nfs_inode_remove_request(req); 1015 next: 1016 nfs_end_page_writeback(req); 1017 nfs_release_request(req); 1018 } 1019 out: 1020 nfs_io_completion_put(hdr->io_completion); 1021 hdr->release(hdr); 1022 } 1023 1024 unsigned long 1025 nfs_reqs_to_commit(struct nfs_commit_info *cinfo) 1026 { 1027 return atomic_long_read(&cinfo->mds->ncommit); 1028 } 1029 1030 /* NFS_I(cinfo->inode)->commit_mutex held by caller */ 1031 int 1032 nfs_scan_commit_list(struct list_head *src, struct list_head *dst, 1033 struct nfs_commit_info *cinfo, int max) 1034 { 1035 struct nfs_page *req, *tmp; 1036 int ret = 0; 1037 1038 restart: 1039 list_for_each_entry_safe(req, tmp, src, wb_list) { 1040 kref_get(&req->wb_kref); 1041 if (!nfs_lock_request(req)) { 1042 int status; 1043 1044 /* Prevent deadlock with nfs_lock_and_join_requests */ 1045 if (!list_empty(dst)) { 1046 nfs_release_request(req); 1047 continue; 1048 } 1049 /* Ensure we make progress to prevent livelock */ 1050 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1051 status = nfs_wait_on_request(req); 1052 nfs_release_request(req); 1053 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 1054 if (status < 0) 1055 break; 1056 goto restart; 1057 } 1058 nfs_request_remove_commit_list(req, cinfo); 1059 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); 1060 nfs_list_add_request(req, dst); 1061 ret++; 1062 if ((ret == max) && !cinfo->dreq) 1063 break; 1064 cond_resched(); 1065 } 1066 return ret; 1067 } 1068 EXPORT_SYMBOL_GPL(nfs_scan_commit_list); 1069 1070 /* 1071 * nfs_scan_commit - Scan an inode for commit requests 1072 * @inode: NFS inode to scan 1073 * @dst: mds destination list 1074 * @cinfo: mds and ds lists of reqs ready to commit 1075 * 1076 * Moves requests from the inode's 'commit' request list. 1077 * The requests are *not* checked to ensure that they form a contiguous set. 1078 */ 1079 int 1080 nfs_scan_commit(struct inode *inode, struct list_head *dst, 1081 struct nfs_commit_info *cinfo) 1082 { 1083 int ret = 0; 1084 1085 if (!atomic_long_read(&cinfo->mds->ncommit)) 1086 return 0; 1087 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 1088 if (atomic_long_read(&cinfo->mds->ncommit) > 0) { 1089 const int max = INT_MAX; 1090 1091 ret = nfs_scan_commit_list(&cinfo->mds->list, dst, 1092 cinfo, max); 1093 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); 1094 } 1095 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1096 return ret; 1097 } 1098 1099 /* 1100 * Search for an existing write request, and attempt to update 1101 * it to reflect a new dirty region on a given page. 1102 * 1103 * If the attempt fails, then the existing request is flushed out 1104 * to disk. 1105 */ 1106 static struct nfs_page *nfs_try_to_update_request(struct inode *inode, 1107 struct page *page, 1108 unsigned int offset, 1109 unsigned int bytes) 1110 { 1111 struct nfs_page *req; 1112 unsigned int rqend; 1113 unsigned int end; 1114 int error; 1115 1116 end = offset + bytes; 1117 1118 req = nfs_lock_and_join_requests(page); 1119 if (IS_ERR_OR_NULL(req)) 1120 return req; 1121 1122 rqend = req->wb_offset + req->wb_bytes; 1123 /* 1124 * Tell the caller to flush out the request if 1125 * the offsets are non-contiguous. 1126 * Note: nfs_flush_incompatible() will already 1127 * have flushed out requests having wrong owners. 1128 */ 1129 if (offset > rqend || end < req->wb_offset) 1130 goto out_flushme; 1131 1132 /* Okay, the request matches. Update the region */ 1133 if (offset < req->wb_offset) { 1134 req->wb_offset = offset; 1135 req->wb_pgbase = offset; 1136 } 1137 if (end > rqend) 1138 req->wb_bytes = end - req->wb_offset; 1139 else 1140 req->wb_bytes = rqend - req->wb_offset; 1141 return req; 1142 out_flushme: 1143 /* 1144 * Note: we mark the request dirty here because 1145 * nfs_lock_and_join_requests() cannot preserve 1146 * commit flags, so we have to replay the write. 1147 */ 1148 nfs_mark_request_dirty(req); 1149 nfs_unlock_and_release_request(req); 1150 error = nfs_wb_page(inode, page); 1151 return (error < 0) ? ERR_PTR(error) : NULL; 1152 } 1153 1154 /* 1155 * Try to update an existing write request, or create one if there is none. 1156 * 1157 * Note: Should always be called with the Page Lock held to prevent races 1158 * if we have to add a new request. Also assumes that the caller has 1159 * already called nfs_flush_incompatible() if necessary. 1160 */ 1161 static struct nfs_page * nfs_setup_write_request(struct nfs_open_context* ctx, 1162 struct page *page, unsigned int offset, unsigned int bytes) 1163 { 1164 struct inode *inode = page_file_mapping(page)->host; 1165 struct nfs_page *req; 1166 1167 req = nfs_try_to_update_request(inode, page, offset, bytes); 1168 if (req != NULL) 1169 goto out; 1170 req = nfs_create_request(ctx, page, NULL, offset, bytes); 1171 if (IS_ERR(req)) 1172 goto out; 1173 nfs_inode_add_request(inode, req); 1174 out: 1175 return req; 1176 } 1177 1178 static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page, 1179 unsigned int offset, unsigned int count) 1180 { 1181 struct nfs_page *req; 1182 1183 req = nfs_setup_write_request(ctx, page, offset, count); 1184 if (IS_ERR(req)) 1185 return PTR_ERR(req); 1186 /* Update file length */ 1187 nfs_grow_file(page, offset, count); 1188 nfs_mark_uptodate(req); 1189 nfs_mark_request_dirty(req); 1190 nfs_unlock_and_release_request(req); 1191 return 0; 1192 } 1193 1194 int nfs_flush_incompatible(struct file *file, struct page *page) 1195 { 1196 struct nfs_open_context *ctx = nfs_file_open_context(file); 1197 struct nfs_lock_context *l_ctx; 1198 struct file_lock_context *flctx = file_inode(file)->i_flctx; 1199 struct nfs_page *req; 1200 int do_flush, status; 1201 /* 1202 * Look for a request corresponding to this page. If there 1203 * is one, and it belongs to another file, we flush it out 1204 * before we try to copy anything into the page. Do this 1205 * due to the lack of an ACCESS-type call in NFSv2. 1206 * Also do the same if we find a request from an existing 1207 * dropped page. 1208 */ 1209 do { 1210 req = nfs_page_find_head_request(page); 1211 if (req == NULL) 1212 return 0; 1213 l_ctx = req->wb_lock_context; 1214 do_flush = req->wb_page != page || 1215 !nfs_match_open_context(req->wb_context, ctx); 1216 if (l_ctx && flctx && 1217 !(list_empty_careful(&flctx->flc_posix) && 1218 list_empty_careful(&flctx->flc_flock))) { 1219 do_flush |= l_ctx->lockowner != current->files; 1220 } 1221 nfs_release_request(req); 1222 if (!do_flush) 1223 return 0; 1224 status = nfs_wb_page(page_file_mapping(page)->host, page); 1225 } while (status == 0); 1226 return status; 1227 } 1228 1229 /* 1230 * Avoid buffered writes when a open context credential's key would 1231 * expire soon. 1232 * 1233 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL. 1234 * 1235 * Return 0 and set a credential flag which triggers the inode to flush 1236 * and performs NFS_FILE_SYNC writes if the key will expired within 1237 * RPC_KEY_EXPIRE_TIMEO. 1238 */ 1239 int 1240 nfs_key_timeout_notify(struct file *filp, struct inode *inode) 1241 { 1242 struct nfs_open_context *ctx = nfs_file_open_context(filp); 1243 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; 1244 1245 return rpcauth_key_timeout_notify(auth, ctx->cred); 1246 } 1247 1248 /* 1249 * Test if the open context credential key is marked to expire soon. 1250 */ 1251 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode) 1252 { 1253 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; 1254 1255 return rpcauth_cred_key_to_expire(auth, ctx->cred); 1256 } 1257 1258 /* 1259 * If the page cache is marked as unsafe or invalid, then we can't rely on 1260 * the PageUptodate() flag. In this case, we will need to turn off 1261 * write optimisations that depend on the page contents being correct. 1262 */ 1263 static bool nfs_write_pageuptodate(struct page *page, struct inode *inode) 1264 { 1265 struct nfs_inode *nfsi = NFS_I(inode); 1266 1267 if (nfs_have_delegated_attributes(inode)) 1268 goto out; 1269 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) 1270 return false; 1271 smp_rmb(); 1272 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags)) 1273 return false; 1274 out: 1275 if (nfsi->cache_validity & NFS_INO_INVALID_DATA) 1276 return false; 1277 return PageUptodate(page) != 0; 1278 } 1279 1280 static bool 1281 is_whole_file_wrlock(struct file_lock *fl) 1282 { 1283 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && 1284 fl->fl_type == F_WRLCK; 1285 } 1286 1287 /* If we know the page is up to date, and we're not using byte range locks (or 1288 * if we have the whole file locked for writing), it may be more efficient to 1289 * extend the write to cover the entire page in order to avoid fragmentation 1290 * inefficiencies. 1291 * 1292 * If the file is opened for synchronous writes then we can just skip the rest 1293 * of the checks. 1294 */ 1295 static int nfs_can_extend_write(struct file *file, struct page *page, struct inode *inode) 1296 { 1297 int ret; 1298 struct file_lock_context *flctx = inode->i_flctx; 1299 struct file_lock *fl; 1300 1301 if (file->f_flags & O_DSYNC) 1302 return 0; 1303 if (!nfs_write_pageuptodate(page, inode)) 1304 return 0; 1305 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 1306 return 1; 1307 if (!flctx || (list_empty_careful(&flctx->flc_flock) && 1308 list_empty_careful(&flctx->flc_posix))) 1309 return 1; 1310 1311 /* Check to see if there are whole file write locks */ 1312 ret = 0; 1313 spin_lock(&flctx->flc_lock); 1314 if (!list_empty(&flctx->flc_posix)) { 1315 fl = list_first_entry(&flctx->flc_posix, struct file_lock, 1316 fl_list); 1317 if (is_whole_file_wrlock(fl)) 1318 ret = 1; 1319 } else if (!list_empty(&flctx->flc_flock)) { 1320 fl = list_first_entry(&flctx->flc_flock, struct file_lock, 1321 fl_list); 1322 if (fl->fl_type == F_WRLCK) 1323 ret = 1; 1324 } 1325 spin_unlock(&flctx->flc_lock); 1326 return ret; 1327 } 1328 1329 /* 1330 * Update and possibly write a cached page of an NFS file. 1331 * 1332 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad 1333 * things with a page scheduled for an RPC call (e.g. invalidate it). 1334 */ 1335 int nfs_updatepage(struct file *file, struct page *page, 1336 unsigned int offset, unsigned int count) 1337 { 1338 struct nfs_open_context *ctx = nfs_file_open_context(file); 1339 struct inode *inode = page_file_mapping(page)->host; 1340 int status = 0; 1341 1342 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 1343 1344 dprintk("NFS: nfs_updatepage(%pD2 %d@%lld)\n", 1345 file, count, (long long)(page_file_offset(page) + offset)); 1346 1347 if (!count) 1348 goto out; 1349 1350 if (nfs_can_extend_write(file, page, inode)) { 1351 count = max(count + offset, nfs_page_length(page)); 1352 offset = 0; 1353 } 1354 1355 status = nfs_writepage_setup(ctx, page, offset, count); 1356 if (status < 0) 1357 nfs_set_pageerror(page); 1358 else 1359 __set_page_dirty_nobuffers(page); 1360 out: 1361 dprintk("NFS: nfs_updatepage returns %d (isize %lld)\n", 1362 status, (long long)i_size_read(inode)); 1363 return status; 1364 } 1365 1366 static int flush_task_priority(int how) 1367 { 1368 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { 1369 case FLUSH_HIGHPRI: 1370 return RPC_PRIORITY_HIGH; 1371 case FLUSH_LOWPRI: 1372 return RPC_PRIORITY_LOW; 1373 } 1374 return RPC_PRIORITY_NORMAL; 1375 } 1376 1377 static void nfs_initiate_write(struct nfs_pgio_header *hdr, 1378 struct rpc_message *msg, 1379 const struct nfs_rpc_ops *rpc_ops, 1380 struct rpc_task_setup *task_setup_data, int how) 1381 { 1382 int priority = flush_task_priority(how); 1383 1384 task_setup_data->priority = priority; 1385 rpc_ops->write_setup(hdr, msg); 1386 1387 nfs4_state_protect_write(NFS_SERVER(hdr->inode)->nfs_client, 1388 &task_setup_data->rpc_client, msg, hdr); 1389 } 1390 1391 /* If a nfs_flush_* function fails, it should remove reqs from @head and 1392 * call this on each, which will prepare them to be retried on next 1393 * writeback using standard nfs. 1394 */ 1395 static void nfs_redirty_request(struct nfs_page *req) 1396 { 1397 nfs_mark_request_dirty(req); 1398 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); 1399 nfs_end_page_writeback(req); 1400 nfs_release_request(req); 1401 } 1402 1403 static void nfs_async_write_error(struct list_head *head) 1404 { 1405 struct nfs_page *req; 1406 1407 while (!list_empty(head)) { 1408 req = nfs_list_entry(head->next); 1409 nfs_list_remove_request(req); 1410 nfs_redirty_request(req); 1411 } 1412 } 1413 1414 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) 1415 { 1416 nfs_async_write_error(&hdr->pages); 1417 } 1418 1419 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { 1420 .init_hdr = nfs_async_write_init, 1421 .error_cleanup = nfs_async_write_error, 1422 .completion = nfs_write_completion, 1423 .reschedule_io = nfs_async_write_reschedule_io, 1424 }; 1425 1426 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, 1427 struct inode *inode, int ioflags, bool force_mds, 1428 const struct nfs_pgio_completion_ops *compl_ops) 1429 { 1430 struct nfs_server *server = NFS_SERVER(inode); 1431 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; 1432 1433 #ifdef CONFIG_NFS_V4_1 1434 if (server->pnfs_curr_ld && !force_mds) 1435 pg_ops = server->pnfs_curr_ld->pg_write_ops; 1436 #endif 1437 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops, 1438 server->wsize, ioflags); 1439 } 1440 EXPORT_SYMBOL_GPL(nfs_pageio_init_write); 1441 1442 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) 1443 { 1444 struct nfs_pgio_mirror *mirror; 1445 1446 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) 1447 pgio->pg_ops->pg_cleanup(pgio); 1448 1449 pgio->pg_ops = &nfs_pgio_rw_ops; 1450 1451 nfs_pageio_stop_mirroring(pgio); 1452 1453 mirror = &pgio->pg_mirrors[0]; 1454 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; 1455 } 1456 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); 1457 1458 1459 void nfs_commit_prepare(struct rpc_task *task, void *calldata) 1460 { 1461 struct nfs_commit_data *data = calldata; 1462 1463 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); 1464 } 1465 1466 /* 1467 * Special version of should_remove_suid() that ignores capabilities. 1468 */ 1469 static int nfs_should_remove_suid(const struct inode *inode) 1470 { 1471 umode_t mode = inode->i_mode; 1472 int kill = 0; 1473 1474 /* suid always must be killed */ 1475 if (unlikely(mode & S_ISUID)) 1476 kill = ATTR_KILL_SUID; 1477 1478 /* 1479 * sgid without any exec bits is just a mandatory locking mark; leave 1480 * it alone. If some exec bits are set, it's a real sgid; kill it. 1481 */ 1482 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) 1483 kill |= ATTR_KILL_SGID; 1484 1485 if (unlikely(kill && S_ISREG(mode))) 1486 return kill; 1487 1488 return 0; 1489 } 1490 1491 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, 1492 struct nfs_fattr *fattr) 1493 { 1494 struct nfs_pgio_args *argp = &hdr->args; 1495 struct nfs_pgio_res *resp = &hdr->res; 1496 u64 size = argp->offset + resp->count; 1497 1498 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1499 fattr->size = size; 1500 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { 1501 fattr->valid &= ~NFS_ATTR_FATTR_SIZE; 1502 return; 1503 } 1504 if (size != fattr->size) 1505 return; 1506 /* Set attribute barrier */ 1507 nfs_fattr_set_barrier(fattr); 1508 /* ...and update size */ 1509 fattr->valid |= NFS_ATTR_FATTR_SIZE; 1510 } 1511 1512 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1513 { 1514 struct nfs_fattr *fattr = &hdr->fattr; 1515 struct inode *inode = hdr->inode; 1516 1517 spin_lock(&inode->i_lock); 1518 nfs_writeback_check_extend(hdr, fattr); 1519 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1520 spin_unlock(&inode->i_lock); 1521 } 1522 EXPORT_SYMBOL_GPL(nfs_writeback_update_inode); 1523 1524 /* 1525 * This function is called when the WRITE call is complete. 1526 */ 1527 static int nfs_writeback_done(struct rpc_task *task, 1528 struct nfs_pgio_header *hdr, 1529 struct inode *inode) 1530 { 1531 int status; 1532 1533 /* 1534 * ->write_done will attempt to use post-op attributes to detect 1535 * conflicting writes by other clients. A strict interpretation 1536 * of close-to-open would allow us to continue caching even if 1537 * another writer had changed the file, but some applications 1538 * depend on tighter cache coherency when writing. 1539 */ 1540 status = NFS_PROTO(inode)->write_done(task, hdr); 1541 if (status != 0) 1542 return status; 1543 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); 1544 1545 if (hdr->res.verf->committed < hdr->args.stable && 1546 task->tk_status >= 0) { 1547 /* We tried a write call, but the server did not 1548 * commit data to stable storage even though we 1549 * requested it. 1550 * Note: There is a known bug in Tru64 < 5.0 in which 1551 * the server reports NFS_DATA_SYNC, but performs 1552 * NFS_FILE_SYNC. We therefore implement this checking 1553 * as a dprintk() in order to avoid filling syslog. 1554 */ 1555 static unsigned long complain; 1556 1557 /* Note this will print the MDS for a DS write */ 1558 if (time_before(complain, jiffies)) { 1559 dprintk("NFS: faulty NFS server %s:" 1560 " (committed = %d) != (stable = %d)\n", 1561 NFS_SERVER(inode)->nfs_client->cl_hostname, 1562 hdr->res.verf->committed, hdr->args.stable); 1563 complain = jiffies + 300 * HZ; 1564 } 1565 } 1566 1567 /* Deal with the suid/sgid bit corner case */ 1568 if (nfs_should_remove_suid(inode)) 1569 nfs_mark_for_revalidate(inode); 1570 return 0; 1571 } 1572 1573 /* 1574 * This function is called when the WRITE call is complete. 1575 */ 1576 static void nfs_writeback_result(struct rpc_task *task, 1577 struct nfs_pgio_header *hdr) 1578 { 1579 struct nfs_pgio_args *argp = &hdr->args; 1580 struct nfs_pgio_res *resp = &hdr->res; 1581 1582 if (resp->count < argp->count) { 1583 static unsigned long complain; 1584 1585 /* This a short write! */ 1586 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); 1587 1588 /* Has the server at least made some progress? */ 1589 if (resp->count == 0) { 1590 if (time_before(complain, jiffies)) { 1591 printk(KERN_WARNING 1592 "NFS: Server wrote zero bytes, expected %u.\n", 1593 argp->count); 1594 complain = jiffies + 300 * HZ; 1595 } 1596 nfs_set_pgio_error(hdr, -EIO, argp->offset); 1597 task->tk_status = -EIO; 1598 return; 1599 } 1600 1601 /* For non rpc-based layout drivers, retry-through-MDS */ 1602 if (!task->tk_ops) { 1603 hdr->pnfs_error = -EAGAIN; 1604 return; 1605 } 1606 1607 /* Was this an NFSv2 write or an NFSv3 stable write? */ 1608 if (resp->verf->committed != NFS_UNSTABLE) { 1609 /* Resend from where the server left off */ 1610 hdr->mds_offset += resp->count; 1611 argp->offset += resp->count; 1612 argp->pgbase += resp->count; 1613 argp->count -= resp->count; 1614 } else { 1615 /* Resend as a stable write in order to avoid 1616 * headaches in the case of a server crash. 1617 */ 1618 argp->stable = NFS_FILE_SYNC; 1619 } 1620 rpc_restart_call_prepare(task); 1621 } 1622 } 1623 1624 static int wait_on_commit(struct nfs_mds_commit_info *cinfo) 1625 { 1626 return wait_on_atomic_t(&cinfo->rpcs_out, 1627 nfs_wait_atomic_killable, TASK_KILLABLE); 1628 } 1629 1630 static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) 1631 { 1632 atomic_inc(&cinfo->rpcs_out); 1633 } 1634 1635 static void nfs_commit_end(struct nfs_mds_commit_info *cinfo) 1636 { 1637 if (atomic_dec_and_test(&cinfo->rpcs_out)) 1638 wake_up_atomic_t(&cinfo->rpcs_out); 1639 } 1640 1641 void nfs_commitdata_release(struct nfs_commit_data *data) 1642 { 1643 put_nfs_open_context(data->context); 1644 nfs_commit_free(data); 1645 } 1646 EXPORT_SYMBOL_GPL(nfs_commitdata_release); 1647 1648 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, 1649 const struct nfs_rpc_ops *nfs_ops, 1650 const struct rpc_call_ops *call_ops, 1651 int how, int flags) 1652 { 1653 struct rpc_task *task; 1654 int priority = flush_task_priority(how); 1655 struct rpc_message msg = { 1656 .rpc_argp = &data->args, 1657 .rpc_resp = &data->res, 1658 .rpc_cred = data->cred, 1659 }; 1660 struct rpc_task_setup task_setup_data = { 1661 .task = &data->task, 1662 .rpc_client = clnt, 1663 .rpc_message = &msg, 1664 .callback_ops = call_ops, 1665 .callback_data = data, 1666 .workqueue = nfsiod_workqueue, 1667 .flags = RPC_TASK_ASYNC | flags, 1668 .priority = priority, 1669 }; 1670 /* Set up the initial task struct. */ 1671 nfs_ops->commit_setup(data, &msg); 1672 1673 dprintk("NFS: initiated commit call\n"); 1674 1675 nfs4_state_protect(NFS_SERVER(data->inode)->nfs_client, 1676 NFS_SP4_MACH_CRED_COMMIT, &task_setup_data.rpc_client, &msg); 1677 1678 task = rpc_run_task(&task_setup_data); 1679 if (IS_ERR(task)) 1680 return PTR_ERR(task); 1681 if (how & FLUSH_SYNC) 1682 rpc_wait_for_completion_task(task); 1683 rpc_put_task(task); 1684 return 0; 1685 } 1686 EXPORT_SYMBOL_GPL(nfs_initiate_commit); 1687 1688 static loff_t nfs_get_lwb(struct list_head *head) 1689 { 1690 loff_t lwb = 0; 1691 struct nfs_page *req; 1692 1693 list_for_each_entry(req, head, wb_list) 1694 if (lwb < (req_offset(req) + req->wb_bytes)) 1695 lwb = req_offset(req) + req->wb_bytes; 1696 1697 return lwb; 1698 } 1699 1700 /* 1701 * Set up the argument/result storage required for the RPC call. 1702 */ 1703 void nfs_init_commit(struct nfs_commit_data *data, 1704 struct list_head *head, 1705 struct pnfs_layout_segment *lseg, 1706 struct nfs_commit_info *cinfo) 1707 { 1708 struct nfs_page *first = nfs_list_entry(head->next); 1709 struct inode *inode = d_inode(first->wb_context->dentry); 1710 1711 /* Set up the RPC argument and reply structs 1712 * NB: take care not to mess about with data->commit et al. */ 1713 1714 list_splice_init(head, &data->pages); 1715 1716 data->inode = inode; 1717 data->cred = first->wb_context->cred; 1718 data->lseg = lseg; /* reference transferred */ 1719 /* only set lwb for pnfs commit */ 1720 if (lseg) 1721 data->lwb = nfs_get_lwb(&data->pages); 1722 data->mds_ops = &nfs_commit_ops; 1723 data->completion_ops = cinfo->completion_ops; 1724 data->dreq = cinfo->dreq; 1725 1726 data->args.fh = NFS_FH(data->inode); 1727 /* Note: we always request a commit of the entire inode */ 1728 data->args.offset = 0; 1729 data->args.count = 0; 1730 data->context = get_nfs_open_context(first->wb_context); 1731 data->res.fattr = &data->fattr; 1732 data->res.verf = &data->verf; 1733 nfs_fattr_init(&data->fattr); 1734 } 1735 EXPORT_SYMBOL_GPL(nfs_init_commit); 1736 1737 void nfs_retry_commit(struct list_head *page_list, 1738 struct pnfs_layout_segment *lseg, 1739 struct nfs_commit_info *cinfo, 1740 u32 ds_commit_idx) 1741 { 1742 struct nfs_page *req; 1743 1744 while (!list_empty(page_list)) { 1745 req = nfs_list_entry(page_list->next); 1746 nfs_list_remove_request(req); 1747 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); 1748 if (!cinfo->dreq) 1749 nfs_clear_page_commit(req->wb_page); 1750 nfs_unlock_and_release_request(req); 1751 } 1752 } 1753 EXPORT_SYMBOL_GPL(nfs_retry_commit); 1754 1755 static void 1756 nfs_commit_resched_write(struct nfs_commit_info *cinfo, 1757 struct nfs_page *req) 1758 { 1759 __set_page_dirty_nobuffers(req->wb_page); 1760 } 1761 1762 /* 1763 * Commit dirty pages 1764 */ 1765 static int 1766 nfs_commit_list(struct inode *inode, struct list_head *head, int how, 1767 struct nfs_commit_info *cinfo) 1768 { 1769 struct nfs_commit_data *data; 1770 1771 /* another commit raced with us */ 1772 if (list_empty(head)) 1773 return 0; 1774 1775 data = nfs_commitdata_alloc(true); 1776 1777 /* Set up the argument struct */ 1778 nfs_init_commit(data, head, NULL, cinfo); 1779 atomic_inc(&cinfo->mds->rpcs_out); 1780 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), 1781 data->mds_ops, how, 0); 1782 } 1783 1784 /* 1785 * COMMIT call returned 1786 */ 1787 static void nfs_commit_done(struct rpc_task *task, void *calldata) 1788 { 1789 struct nfs_commit_data *data = calldata; 1790 1791 dprintk("NFS: %5u nfs_commit_done (status %d)\n", 1792 task->tk_pid, task->tk_status); 1793 1794 /* Call the NFS version-specific code */ 1795 NFS_PROTO(data->inode)->commit_done(task, data); 1796 } 1797 1798 static void nfs_commit_release_pages(struct nfs_commit_data *data) 1799 { 1800 struct nfs_page *req; 1801 int status = data->task.tk_status; 1802 struct nfs_commit_info cinfo; 1803 struct nfs_server *nfss; 1804 1805 while (!list_empty(&data->pages)) { 1806 req = nfs_list_entry(data->pages.next); 1807 nfs_list_remove_request(req); 1808 if (req->wb_page) 1809 nfs_clear_page_commit(req->wb_page); 1810 1811 dprintk("NFS: commit (%s/%llu %d@%lld)", 1812 req->wb_context->dentry->d_sb->s_id, 1813 (unsigned long long)NFS_FILEID(d_inode(req->wb_context->dentry)), 1814 req->wb_bytes, 1815 (long long)req_offset(req)); 1816 if (status < 0) { 1817 nfs_context_set_write_error(req->wb_context, status); 1818 if (req->wb_page) 1819 nfs_inode_remove_request(req); 1820 dprintk_cont(", error = %d\n", status); 1821 goto next; 1822 } 1823 1824 /* Okay, COMMIT succeeded, apparently. Check the verifier 1825 * returned by the server against all stored verfs. */ 1826 if (!nfs_write_verifier_cmp(&req->wb_verf, &data->verf.verifier)) { 1827 /* We have a match */ 1828 if (req->wb_page) 1829 nfs_inode_remove_request(req); 1830 dprintk_cont(" OK\n"); 1831 goto next; 1832 } 1833 /* We have a mismatch. Write the page again */ 1834 dprintk_cont(" mismatch\n"); 1835 nfs_mark_request_dirty(req); 1836 set_bit(NFS_CONTEXT_RESEND_WRITES, &req->wb_context->flags); 1837 next: 1838 nfs_unlock_and_release_request(req); 1839 } 1840 nfss = NFS_SERVER(data->inode); 1841 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 1842 clear_bdi_congested(inode_to_bdi(data->inode), BLK_RW_ASYNC); 1843 1844 nfs_init_cinfo(&cinfo, data->inode, data->dreq); 1845 nfs_commit_end(cinfo.mds); 1846 } 1847 1848 static void nfs_commit_release(void *calldata) 1849 { 1850 struct nfs_commit_data *data = calldata; 1851 1852 data->completion_ops->completion(data); 1853 nfs_commitdata_release(calldata); 1854 } 1855 1856 static const struct rpc_call_ops nfs_commit_ops = { 1857 .rpc_call_prepare = nfs_commit_prepare, 1858 .rpc_call_done = nfs_commit_done, 1859 .rpc_release = nfs_commit_release, 1860 }; 1861 1862 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = { 1863 .completion = nfs_commit_release_pages, 1864 .resched_write = nfs_commit_resched_write, 1865 }; 1866 1867 int nfs_generic_commit_list(struct inode *inode, struct list_head *head, 1868 int how, struct nfs_commit_info *cinfo) 1869 { 1870 int status; 1871 1872 status = pnfs_commit_list(inode, head, how, cinfo); 1873 if (status == PNFS_NOT_ATTEMPTED) 1874 status = nfs_commit_list(inode, head, how, cinfo); 1875 return status; 1876 } 1877 1878 int nfs_commit_inode(struct inode *inode, int how) 1879 { 1880 LIST_HEAD(head); 1881 struct nfs_commit_info cinfo; 1882 int may_wait = how & FLUSH_SYNC; 1883 int error = 0; 1884 int res; 1885 1886 nfs_init_cinfo_from_inode(&cinfo, inode); 1887 nfs_commit_begin(cinfo.mds); 1888 res = nfs_scan_commit(inode, &head, &cinfo); 1889 if (res) 1890 error = nfs_generic_commit_list(inode, &head, how, &cinfo); 1891 nfs_commit_end(cinfo.mds); 1892 if (error < 0) 1893 goto out_error; 1894 if (!may_wait) 1895 goto out_mark_dirty; 1896 error = wait_on_commit(cinfo.mds); 1897 if (error < 0) 1898 return error; 1899 return res; 1900 out_error: 1901 res = error; 1902 /* Note: If we exit without ensuring that the commit is complete, 1903 * we must mark the inode as dirty. Otherwise, future calls to 1904 * sync_inode() with the WB_SYNC_ALL flag set will fail to ensure 1905 * that the data is on the disk. 1906 */ 1907 out_mark_dirty: 1908 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1909 return res; 1910 } 1911 EXPORT_SYMBOL_GPL(nfs_commit_inode); 1912 1913 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1914 { 1915 struct nfs_inode *nfsi = NFS_I(inode); 1916 int flags = FLUSH_SYNC; 1917 int ret = 0; 1918 1919 /* no commits means nothing needs to be done */ 1920 if (!atomic_long_read(&nfsi->commit_info.ncommit)) 1921 return ret; 1922 1923 if (wbc->sync_mode == WB_SYNC_NONE) { 1924 /* Don't commit yet if this is a non-blocking flush and there 1925 * are a lot of outstanding writes for this mapping. 1926 */ 1927 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) 1928 goto out_mark_dirty; 1929 1930 /* don't wait for the COMMIT response */ 1931 flags = 0; 1932 } 1933 1934 ret = nfs_commit_inode(inode, flags); 1935 if (ret >= 0) { 1936 if (wbc->sync_mode == WB_SYNC_NONE) { 1937 if (ret < wbc->nr_to_write) 1938 wbc->nr_to_write -= ret; 1939 else 1940 wbc->nr_to_write = 0; 1941 } 1942 return 0; 1943 } 1944 out_mark_dirty: 1945 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1946 return ret; 1947 } 1948 EXPORT_SYMBOL_GPL(nfs_write_inode); 1949 1950 /* 1951 * Wrapper for filemap_write_and_wait_range() 1952 * 1953 * Needed for pNFS in order to ensure data becomes visible to the 1954 * client. 1955 */ 1956 int nfs_filemap_write_and_wait_range(struct address_space *mapping, 1957 loff_t lstart, loff_t lend) 1958 { 1959 int ret; 1960 1961 ret = filemap_write_and_wait_range(mapping, lstart, lend); 1962 if (ret == 0) 1963 ret = pnfs_sync_inode(mapping->host, true); 1964 return ret; 1965 } 1966 EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range); 1967 1968 /* 1969 * flush the inode to disk. 1970 */ 1971 int nfs_wb_all(struct inode *inode) 1972 { 1973 int ret; 1974 1975 trace_nfs_writeback_inode_enter(inode); 1976 1977 ret = filemap_write_and_wait(inode->i_mapping); 1978 if (ret) 1979 goto out; 1980 ret = nfs_commit_inode(inode, FLUSH_SYNC); 1981 if (ret < 0) 1982 goto out; 1983 pnfs_sync_inode(inode, true); 1984 ret = 0; 1985 1986 out: 1987 trace_nfs_writeback_inode_exit(inode, ret); 1988 return ret; 1989 } 1990 EXPORT_SYMBOL_GPL(nfs_wb_all); 1991 1992 int nfs_wb_page_cancel(struct inode *inode, struct page *page) 1993 { 1994 struct nfs_page *req; 1995 int ret = 0; 1996 1997 wait_on_page_writeback(page); 1998 1999 /* blocking call to cancel all requests and join to a single (head) 2000 * request */ 2001 req = nfs_lock_and_join_requests(page); 2002 2003 if (IS_ERR(req)) { 2004 ret = PTR_ERR(req); 2005 } else if (req) { 2006 /* all requests from this page have been cancelled by 2007 * nfs_lock_and_join_requests, so just remove the head 2008 * request from the inode / page_private pointer and 2009 * release it */ 2010 nfs_inode_remove_request(req); 2011 nfs_unlock_and_release_request(req); 2012 } 2013 2014 return ret; 2015 } 2016 2017 /* 2018 * Write back all requests on one page - we do this before reading it. 2019 */ 2020 int nfs_wb_page(struct inode *inode, struct page *page) 2021 { 2022 loff_t range_start = page_file_offset(page); 2023 loff_t range_end = range_start + (loff_t)(PAGE_SIZE - 1); 2024 struct writeback_control wbc = { 2025 .sync_mode = WB_SYNC_ALL, 2026 .nr_to_write = 0, 2027 .range_start = range_start, 2028 .range_end = range_end, 2029 }; 2030 int ret; 2031 2032 trace_nfs_writeback_page_enter(inode); 2033 2034 for (;;) { 2035 wait_on_page_writeback(page); 2036 if (clear_page_dirty_for_io(page)) { 2037 ret = nfs_writepage_locked(page, &wbc); 2038 if (ret < 0) 2039 goto out_error; 2040 continue; 2041 } 2042 ret = 0; 2043 if (!PagePrivate(page)) 2044 break; 2045 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2046 if (ret < 0) 2047 goto out_error; 2048 } 2049 out_error: 2050 trace_nfs_writeback_page_exit(inode, ret); 2051 return ret; 2052 } 2053 2054 #ifdef CONFIG_MIGRATION 2055 int nfs_migrate_page(struct address_space *mapping, struct page *newpage, 2056 struct page *page, enum migrate_mode mode) 2057 { 2058 /* 2059 * If PagePrivate is set, then the page is currently associated with 2060 * an in-progress read or write request. Don't try to migrate it. 2061 * 2062 * FIXME: we could do this in principle, but we'll need a way to ensure 2063 * that we can safely release the inode reference while holding 2064 * the page lock. 2065 */ 2066 if (PagePrivate(page)) 2067 return -EBUSY; 2068 2069 if (!nfs_fscache_release_page(page, GFP_KERNEL)) 2070 return -EBUSY; 2071 2072 return migrate_page(mapping, newpage, page, mode); 2073 } 2074 #endif 2075 2076 int __init nfs_init_writepagecache(void) 2077 { 2078 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 2079 sizeof(struct nfs_pgio_header), 2080 0, SLAB_HWCACHE_ALIGN, 2081 NULL); 2082 if (nfs_wdata_cachep == NULL) 2083 return -ENOMEM; 2084 2085 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, 2086 nfs_wdata_cachep); 2087 if (nfs_wdata_mempool == NULL) 2088 goto out_destroy_write_cache; 2089 2090 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", 2091 sizeof(struct nfs_commit_data), 2092 0, SLAB_HWCACHE_ALIGN, 2093 NULL); 2094 if (nfs_cdata_cachep == NULL) 2095 goto out_destroy_write_mempool; 2096 2097 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, 2098 nfs_cdata_cachep); 2099 if (nfs_commit_mempool == NULL) 2100 goto out_destroy_commit_cache; 2101 2102 /* 2103 * NFS congestion size, scale with available memory. 2104 * 2105 * 64MB: 8192k 2106 * 128MB: 11585k 2107 * 256MB: 16384k 2108 * 512MB: 23170k 2109 * 1GB: 32768k 2110 * 2GB: 46340k 2111 * 4GB: 65536k 2112 * 8GB: 92681k 2113 * 16GB: 131072k 2114 * 2115 * This allows larger machines to have larger/more transfers. 2116 * Limit the default to 256M 2117 */ 2118 nfs_congestion_kb = (16*int_sqrt(totalram_pages)) << (PAGE_SHIFT-10); 2119 if (nfs_congestion_kb > 256*1024) 2120 nfs_congestion_kb = 256*1024; 2121 2122 return 0; 2123 2124 out_destroy_commit_cache: 2125 kmem_cache_destroy(nfs_cdata_cachep); 2126 out_destroy_write_mempool: 2127 mempool_destroy(nfs_wdata_mempool); 2128 out_destroy_write_cache: 2129 kmem_cache_destroy(nfs_wdata_cachep); 2130 return -ENOMEM; 2131 } 2132 2133 void nfs_destroy_writepagecache(void) 2134 { 2135 mempool_destroy(nfs_commit_mempool); 2136 kmem_cache_destroy(nfs_cdata_cachep); 2137 mempool_destroy(nfs_wdata_mempool); 2138 kmem_cache_destroy(nfs_wdata_cachep); 2139 } 2140 2141 static const struct nfs_rw_ops nfs_rw_write_ops = { 2142 .rw_alloc_header = nfs_writehdr_alloc, 2143 .rw_free_header = nfs_writehdr_free, 2144 .rw_done = nfs_writeback_done, 2145 .rw_result = nfs_writeback_result, 2146 .rw_initiate = nfs_initiate_write, 2147 }; 2148