1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/nfs/write.c 4 * 5 * Write file data over NFS. 6 * 7 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> 8 */ 9 10 #include <linux/types.h> 11 #include <linux/slab.h> 12 #include <linux/mm.h> 13 #include <linux/pagemap.h> 14 #include <linux/file.h> 15 #include <linux/writeback.h> 16 #include <linux/swap.h> 17 #include <linux/migrate.h> 18 19 #include <linux/sunrpc/clnt.h> 20 #include <linux/nfs_fs.h> 21 #include <linux/nfs_mount.h> 22 #include <linux/nfs_page.h> 23 #include <linux/backing-dev.h> 24 #include <linux/export.h> 25 #include <linux/freezer.h> 26 #include <linux/wait.h> 27 #include <linux/iversion.h> 28 #include <linux/filelock.h> 29 30 #include <linux/uaccess.h> 31 #include <linux/sched/mm.h> 32 33 #include "delegation.h" 34 #include "internal.h" 35 #include "iostat.h" 36 #include "nfs4_fs.h" 37 #include "fscache.h" 38 #include "pnfs.h" 39 40 #include "nfstrace.h" 41 42 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 43 44 #define MIN_POOL_WRITE (32) 45 #define MIN_POOL_COMMIT (4) 46 47 struct nfs_io_completion { 48 void (*complete)(void *data); 49 void *data; 50 struct kref refcount; 51 }; 52 53 /* 54 * Local function declarations 55 */ 56 static void nfs_redirty_request(struct nfs_page *req); 57 static const struct rpc_call_ops nfs_commit_ops; 58 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 59 static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 60 static const struct nfs_rw_ops nfs_rw_write_ops; 61 static void nfs_inode_remove_request(struct nfs_page *req); 62 static void nfs_clear_request_commit(struct nfs_page *req); 63 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 64 struct inode *inode); 65 static struct nfs_page * 66 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 67 struct folio *folio); 68 69 static struct kmem_cache *nfs_wdata_cachep; 70 static mempool_t *nfs_wdata_mempool; 71 static struct kmem_cache *nfs_cdata_cachep; 72 static mempool_t *nfs_commit_mempool; 73 74 struct nfs_commit_data *nfs_commitdata_alloc(void) 75 { 76 struct nfs_commit_data *p; 77 78 p = kmem_cache_zalloc(nfs_cdata_cachep, nfs_io_gfp_mask()); 79 if (!p) { 80 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); 81 if (!p) 82 return NULL; 83 memset(p, 0, sizeof(*p)); 84 } 85 INIT_LIST_HEAD(&p->pages); 86 return p; 87 } 88 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); 89 90 void nfs_commit_free(struct nfs_commit_data *p) 91 { 92 mempool_free(p, nfs_commit_mempool); 93 } 94 EXPORT_SYMBOL_GPL(nfs_commit_free); 95 96 static struct nfs_pgio_header *nfs_writehdr_alloc(void) 97 { 98 struct nfs_pgio_header *p; 99 100 p = kmem_cache_zalloc(nfs_wdata_cachep, nfs_io_gfp_mask()); 101 if (!p) { 102 p = mempool_alloc(nfs_wdata_mempool, GFP_NOWAIT); 103 if (!p) 104 return NULL; 105 memset(p, 0, sizeof(*p)); 106 } 107 p->rw_mode = FMODE_WRITE; 108 return p; 109 } 110 111 static void nfs_writehdr_free(struct nfs_pgio_header *hdr) 112 { 113 mempool_free(hdr, nfs_wdata_mempool); 114 } 115 116 static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags) 117 { 118 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags); 119 } 120 121 static void nfs_io_completion_init(struct nfs_io_completion *ioc, 122 void (*complete)(void *), void *data) 123 { 124 ioc->complete = complete; 125 ioc->data = data; 126 kref_init(&ioc->refcount); 127 } 128 129 static void nfs_io_completion_release(struct kref *kref) 130 { 131 struct nfs_io_completion *ioc = container_of(kref, 132 struct nfs_io_completion, refcount); 133 ioc->complete(ioc->data); 134 kfree(ioc); 135 } 136 137 static void nfs_io_completion_get(struct nfs_io_completion *ioc) 138 { 139 if (ioc != NULL) 140 kref_get(&ioc->refcount); 141 } 142 143 static void nfs_io_completion_put(struct nfs_io_completion *ioc) 144 { 145 if (ioc != NULL) 146 kref_put(&ioc->refcount, nfs_io_completion_release); 147 } 148 149 static void 150 nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode) 151 { 152 if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) { 153 kref_get(&req->wb_kref); 154 atomic_long_inc(&NFS_I(inode)->nrequests); 155 } 156 } 157 158 static int 159 nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode) 160 { 161 int ret; 162 163 if (!test_bit(PG_REMOVE, &req->wb_flags)) 164 return 0; 165 ret = nfs_page_group_lock(req); 166 if (ret) 167 return ret; 168 if (test_and_clear_bit(PG_REMOVE, &req->wb_flags)) 169 nfs_page_set_inode_ref(req, inode); 170 nfs_page_group_unlock(req); 171 return 0; 172 } 173 174 static struct nfs_page *nfs_folio_private_request(struct folio *folio) 175 { 176 return folio_get_private(folio); 177 } 178 179 /** 180 * nfs_folio_find_private_request - find head request associated with a folio 181 * @folio: pointer to folio 182 * 183 * must be called while holding the inode lock. 184 * 185 * returns matching head request with reference held, or NULL if not found. 186 */ 187 static struct nfs_page *nfs_folio_find_private_request(struct folio *folio) 188 { 189 struct address_space *mapping = folio_file_mapping(folio); 190 struct nfs_page *req; 191 192 if (!folio_test_private(folio)) 193 return NULL; 194 spin_lock(&mapping->private_lock); 195 req = nfs_folio_private_request(folio); 196 if (req) { 197 WARN_ON_ONCE(req->wb_head != req); 198 kref_get(&req->wb_kref); 199 } 200 spin_unlock(&mapping->private_lock); 201 return req; 202 } 203 204 static struct nfs_page *nfs_folio_find_swap_request(struct folio *folio) 205 { 206 struct inode *inode = folio_file_mapping(folio)->host; 207 struct nfs_inode *nfsi = NFS_I(inode); 208 struct nfs_page *req = NULL; 209 if (!folio_test_swapcache(folio)) 210 return NULL; 211 mutex_lock(&nfsi->commit_mutex); 212 if (folio_test_swapcache(folio)) { 213 req = nfs_page_search_commits_for_head_request_locked(nfsi, 214 folio); 215 if (req) { 216 WARN_ON_ONCE(req->wb_head != req); 217 kref_get(&req->wb_kref); 218 } 219 } 220 mutex_unlock(&nfsi->commit_mutex); 221 return req; 222 } 223 224 /** 225 * nfs_folio_find_head_request - find head request associated with a folio 226 * @folio: pointer to folio 227 * 228 * returns matching head request with reference held, or NULL if not found. 229 */ 230 static struct nfs_page *nfs_folio_find_head_request(struct folio *folio) 231 { 232 struct nfs_page *req; 233 234 req = nfs_folio_find_private_request(folio); 235 if (!req) 236 req = nfs_folio_find_swap_request(folio); 237 return req; 238 } 239 240 static struct nfs_page *nfs_folio_find_and_lock_request(struct folio *folio) 241 { 242 struct inode *inode = folio_file_mapping(folio)->host; 243 struct nfs_page *req, *head; 244 int ret; 245 246 for (;;) { 247 req = nfs_folio_find_head_request(folio); 248 if (!req) 249 return req; 250 head = nfs_page_group_lock_head(req); 251 if (head != req) 252 nfs_release_request(req); 253 if (IS_ERR(head)) 254 return head; 255 ret = nfs_cancel_remove_inode(head, inode); 256 if (ret < 0) { 257 nfs_unlock_and_release_request(head); 258 return ERR_PTR(ret); 259 } 260 /* Ensure that nobody removed the request before we locked it */ 261 if (head == nfs_folio_private_request(folio)) 262 break; 263 if (folio_test_swapcache(folio)) 264 break; 265 nfs_unlock_and_release_request(head); 266 } 267 return head; 268 } 269 270 /* Adjust the file length if we're writing beyond the end */ 271 static void nfs_grow_file(struct folio *folio, unsigned int offset, 272 unsigned int count) 273 { 274 struct inode *inode = folio_file_mapping(folio)->host; 275 loff_t end, i_size; 276 pgoff_t end_index; 277 278 spin_lock(&inode->i_lock); 279 i_size = i_size_read(inode); 280 end_index = ((i_size - 1) >> folio_shift(folio)) << folio_order(folio); 281 if (i_size > 0 && folio_index(folio) < end_index) 282 goto out; 283 end = folio_file_pos(folio) + (loff_t)offset + (loff_t)count; 284 if (i_size >= end) 285 goto out; 286 trace_nfs_size_grow(inode, end); 287 i_size_write(inode, end); 288 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; 289 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); 290 out: 291 spin_unlock(&inode->i_lock); 292 nfs_fscache_invalidate(inode, 0); 293 } 294 295 /* A writeback failed: mark the page as bad, and invalidate the page cache */ 296 static void nfs_set_pageerror(struct address_space *mapping) 297 { 298 struct inode *inode = mapping->host; 299 300 nfs_zap_mapping(mapping->host, mapping); 301 /* Force file size revalidation */ 302 spin_lock(&inode->i_lock); 303 nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED | 304 NFS_INO_INVALID_CHANGE | 305 NFS_INO_INVALID_SIZE); 306 spin_unlock(&inode->i_lock); 307 } 308 309 static void nfs_mapping_set_error(struct folio *folio, int error) 310 { 311 struct address_space *mapping = folio_file_mapping(folio); 312 313 folio_set_error(folio); 314 filemap_set_wb_err(mapping, error); 315 if (mapping->host) 316 errseq_set(&mapping->host->i_sb->s_wb_err, 317 error == -ENOSPC ? -ENOSPC : -EIO); 318 nfs_set_pageerror(mapping); 319 } 320 321 /* 322 * nfs_page_group_search_locked 323 * @head - head request of page group 324 * @page_offset - offset into page 325 * 326 * Search page group with head @head to find a request that contains the 327 * page offset @page_offset. 328 * 329 * Returns a pointer to the first matching nfs request, or NULL if no 330 * match is found. 331 * 332 * Must be called with the page group lock held 333 */ 334 static struct nfs_page * 335 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) 336 { 337 struct nfs_page *req; 338 339 req = head; 340 do { 341 if (page_offset >= req->wb_pgbase && 342 page_offset < (req->wb_pgbase + req->wb_bytes)) 343 return req; 344 345 req = req->wb_this_page; 346 } while (req != head); 347 348 return NULL; 349 } 350 351 /* 352 * nfs_page_group_covers_page 353 * @head - head request of page group 354 * 355 * Return true if the page group with head @head covers the whole page, 356 * returns false otherwise 357 */ 358 static bool nfs_page_group_covers_page(struct nfs_page *req) 359 { 360 unsigned int len = nfs_folio_length(nfs_page_to_folio(req)); 361 struct nfs_page *tmp; 362 unsigned int pos = 0; 363 364 nfs_page_group_lock(req); 365 366 for (;;) { 367 tmp = nfs_page_group_search_locked(req->wb_head, pos); 368 if (!tmp) 369 break; 370 pos = tmp->wb_pgbase + tmp->wb_bytes; 371 } 372 373 nfs_page_group_unlock(req); 374 return pos >= len; 375 } 376 377 /* We can set the PG_uptodate flag if we see that a write request 378 * covers the full page. 379 */ 380 static void nfs_mark_uptodate(struct nfs_page *req) 381 { 382 struct folio *folio = nfs_page_to_folio(req); 383 384 if (folio_test_uptodate(folio)) 385 return; 386 if (!nfs_page_group_covers_page(req)) 387 return; 388 folio_mark_uptodate(folio); 389 } 390 391 static int wb_priority(struct writeback_control *wbc) 392 { 393 int ret = 0; 394 395 if (wbc->sync_mode == WB_SYNC_ALL) 396 ret = FLUSH_COND_STABLE; 397 return ret; 398 } 399 400 /* 401 * NFS congestion control 402 */ 403 404 int nfs_congestion_kb; 405 406 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) 407 #define NFS_CONGESTION_OFF_THRESH \ 408 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) 409 410 static void nfs_folio_set_writeback(struct folio *folio) 411 { 412 struct nfs_server *nfss = NFS_SERVER(folio_file_mapping(folio)->host); 413 414 folio_start_writeback(folio); 415 if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH) 416 nfss->write_congested = 1; 417 } 418 419 static void nfs_folio_end_writeback(struct folio *folio) 420 { 421 struct nfs_server *nfss = NFS_SERVER(folio_file_mapping(folio)->host); 422 423 folio_end_writeback(folio); 424 if (atomic_long_dec_return(&nfss->writeback) < 425 NFS_CONGESTION_OFF_THRESH) 426 nfss->write_congested = 0; 427 } 428 429 static void nfs_page_end_writeback(struct nfs_page *req) 430 { 431 if (nfs_page_group_sync_on_bit(req, PG_WB_END)) { 432 nfs_unlock_request(req); 433 nfs_folio_end_writeback(nfs_page_to_folio(req)); 434 } else 435 nfs_unlock_request(req); 436 } 437 438 /* 439 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests 440 * 441 * @destroy_list - request list (using wb_this_page) terminated by @old_head 442 * @old_head - the old head of the list 443 * 444 * All subrequests must be locked and removed from all lists, so at this point 445 * they are only "active" in this function, and possibly in nfs_wait_on_request 446 * with a reference held by some other context. 447 */ 448 static void 449 nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, 450 struct nfs_page *old_head, 451 struct inode *inode) 452 { 453 while (destroy_list) { 454 struct nfs_page *subreq = destroy_list; 455 456 destroy_list = (subreq->wb_this_page == old_head) ? 457 NULL : subreq->wb_this_page; 458 459 /* Note: lock subreq in order to change subreq->wb_head */ 460 nfs_page_set_headlock(subreq); 461 WARN_ON_ONCE(old_head != subreq->wb_head); 462 463 /* make sure old group is not used */ 464 subreq->wb_this_page = subreq; 465 subreq->wb_head = subreq; 466 467 clear_bit(PG_REMOVE, &subreq->wb_flags); 468 469 /* Note: races with nfs_page_group_destroy() */ 470 if (!kref_read(&subreq->wb_kref)) { 471 /* Check if we raced with nfs_page_group_destroy() */ 472 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { 473 nfs_page_clear_headlock(subreq); 474 nfs_free_request(subreq); 475 } else 476 nfs_page_clear_headlock(subreq); 477 continue; 478 } 479 nfs_page_clear_headlock(subreq); 480 481 nfs_release_request(old_head); 482 483 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { 484 nfs_release_request(subreq); 485 atomic_long_dec(&NFS_I(inode)->nrequests); 486 } 487 488 /* subreq is now totally disconnected from page group or any 489 * write / commit lists. last chance to wake any waiters */ 490 nfs_unlock_and_release_request(subreq); 491 } 492 } 493 494 /* 495 * nfs_join_page_group - destroy subrequests of the head req 496 * @head: the page used to lookup the "page group" of nfs_page structures 497 * @inode: Inode to which the request belongs. 498 * 499 * This function joins all sub requests to the head request by first 500 * locking all requests in the group, cancelling any pending operations 501 * and finally updating the head request to cover the whole range covered by 502 * the (former) group. All subrequests are removed from any write or commit 503 * lists, unlinked from the group and destroyed. 504 */ 505 void 506 nfs_join_page_group(struct nfs_page *head, struct inode *inode) 507 { 508 struct nfs_page *subreq; 509 struct nfs_page *destroy_list = NULL; 510 unsigned int pgbase, off, bytes; 511 512 pgbase = head->wb_pgbase; 513 bytes = head->wb_bytes; 514 off = head->wb_offset; 515 for (subreq = head->wb_this_page; subreq != head; 516 subreq = subreq->wb_this_page) { 517 /* Subrequests should always form a contiguous range */ 518 if (pgbase > subreq->wb_pgbase) { 519 off -= pgbase - subreq->wb_pgbase; 520 bytes += pgbase - subreq->wb_pgbase; 521 pgbase = subreq->wb_pgbase; 522 } 523 bytes = max(subreq->wb_pgbase + subreq->wb_bytes 524 - pgbase, bytes); 525 } 526 527 /* Set the head request's range to cover the former page group */ 528 head->wb_pgbase = pgbase; 529 head->wb_bytes = bytes; 530 head->wb_offset = off; 531 532 /* Now that all requests are locked, make sure they aren't on any list. 533 * Commit list removal accounting is done after locks are dropped */ 534 subreq = head; 535 do { 536 nfs_clear_request_commit(subreq); 537 subreq = subreq->wb_this_page; 538 } while (subreq != head); 539 540 /* unlink subrequests from head, destroy them later */ 541 if (head->wb_this_page != head) { 542 /* destroy list will be terminated by head */ 543 destroy_list = head->wb_this_page; 544 head->wb_this_page = head; 545 } 546 547 nfs_destroy_unlinked_subrequests(destroy_list, head, inode); 548 } 549 550 /* 551 * nfs_lock_and_join_requests - join all subreqs to the head req 552 * @folio: the folio used to lookup the "page group" of nfs_page structures 553 * 554 * This function joins all sub requests to the head request by first 555 * locking all requests in the group, cancelling any pending operations 556 * and finally updating the head request to cover the whole range covered by 557 * the (former) group. All subrequests are removed from any write or commit 558 * lists, unlinked from the group and destroyed. 559 * 560 * Returns a locked, referenced pointer to the head request - which after 561 * this call is guaranteed to be the only request associated with the page. 562 * Returns NULL if no requests are found for @folio, or a ERR_PTR if an 563 * error was encountered. 564 */ 565 static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio) 566 { 567 struct inode *inode = folio_file_mapping(folio)->host; 568 struct nfs_page *head; 569 int ret; 570 571 /* 572 * A reference is taken only on the head request which acts as a 573 * reference to the whole page group - the group will not be destroyed 574 * until the head reference is released. 575 */ 576 head = nfs_folio_find_and_lock_request(folio); 577 if (IS_ERR_OR_NULL(head)) 578 return head; 579 580 /* lock each request in the page group */ 581 ret = nfs_page_group_lock_subrequests(head); 582 if (ret < 0) { 583 nfs_unlock_and_release_request(head); 584 return ERR_PTR(ret); 585 } 586 587 nfs_join_page_group(head, inode); 588 589 return head; 590 } 591 592 static void nfs_write_error(struct nfs_page *req, int error) 593 { 594 trace_nfs_write_error(nfs_page_to_inode(req), req, error); 595 nfs_mapping_set_error(nfs_page_to_folio(req), error); 596 nfs_inode_remove_request(req); 597 nfs_page_end_writeback(req); 598 nfs_release_request(req); 599 } 600 601 /* 602 * Find an associated nfs write request, and prepare to flush it out 603 * May return an error if the user signalled nfs_wait_on_request(). 604 */ 605 static int nfs_page_async_flush(struct folio *folio, 606 struct writeback_control *wbc, 607 struct nfs_pageio_descriptor *pgio) 608 { 609 struct nfs_page *req; 610 int ret = 0; 611 612 req = nfs_lock_and_join_requests(folio); 613 if (!req) 614 goto out; 615 ret = PTR_ERR(req); 616 if (IS_ERR(req)) 617 goto out; 618 619 nfs_folio_set_writeback(folio); 620 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 621 622 /* If there is a fatal error that covers this write, just exit */ 623 ret = pgio->pg_error; 624 if (nfs_error_is_fatal_on_server(ret)) 625 goto out_launder; 626 627 ret = 0; 628 if (!nfs_pageio_add_request(pgio, req)) { 629 ret = pgio->pg_error; 630 /* 631 * Remove the problematic req upon fatal errors on the server 632 */ 633 if (nfs_error_is_fatal_on_server(ret)) 634 goto out_launder; 635 if (wbc->sync_mode == WB_SYNC_NONE) 636 ret = AOP_WRITEPAGE_ACTIVATE; 637 folio_redirty_for_writepage(wbc, folio); 638 nfs_redirty_request(req); 639 pgio->pg_error = 0; 640 } else 641 nfs_add_stats(folio_file_mapping(folio)->host, 642 NFSIOS_WRITEPAGES, 1); 643 out: 644 return ret; 645 out_launder: 646 nfs_write_error(req, ret); 647 return 0; 648 } 649 650 static int nfs_do_writepage(struct folio *folio, struct writeback_control *wbc, 651 struct nfs_pageio_descriptor *pgio) 652 { 653 nfs_pageio_cond_complete(pgio, folio_index(folio)); 654 return nfs_page_async_flush(folio, wbc, pgio); 655 } 656 657 /* 658 * Write an mmapped page to the server. 659 */ 660 static int nfs_writepage_locked(struct folio *folio, 661 struct writeback_control *wbc) 662 { 663 struct nfs_pageio_descriptor pgio; 664 struct inode *inode = folio_file_mapping(folio)->host; 665 int err; 666 667 if (wbc->sync_mode == WB_SYNC_NONE && 668 NFS_SERVER(inode)->write_congested) 669 return AOP_WRITEPAGE_ACTIVATE; 670 671 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 672 nfs_pageio_init_write(&pgio, inode, 0, false, 673 &nfs_async_write_completion_ops); 674 err = nfs_do_writepage(folio, wbc, &pgio); 675 pgio.pg_error = 0; 676 nfs_pageio_complete(&pgio); 677 return err; 678 } 679 680 int nfs_writepage(struct page *page, struct writeback_control *wbc) 681 { 682 struct folio *folio = page_folio(page); 683 int ret; 684 685 ret = nfs_writepage_locked(folio, wbc); 686 if (ret != AOP_WRITEPAGE_ACTIVATE) 687 unlock_page(page); 688 return ret; 689 } 690 691 static int nfs_writepages_callback(struct page *page, 692 struct writeback_control *wbc, void *data) 693 { 694 struct folio *folio = page_folio(page); 695 int ret; 696 697 ret = nfs_do_writepage(folio, wbc, data); 698 if (ret != AOP_WRITEPAGE_ACTIVATE) 699 unlock_page(page); 700 return ret; 701 } 702 703 static void nfs_io_completion_commit(void *inode) 704 { 705 nfs_commit_inode(inode, 0); 706 } 707 708 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) 709 { 710 struct inode *inode = mapping->host; 711 struct nfs_pageio_descriptor pgio; 712 struct nfs_io_completion *ioc = NULL; 713 unsigned int mntflags = NFS_SERVER(inode)->flags; 714 int priority = 0; 715 int err; 716 717 if (wbc->sync_mode == WB_SYNC_NONE && 718 NFS_SERVER(inode)->write_congested) 719 return 0; 720 721 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); 722 723 if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate || 724 wbc->for_background || wbc->for_sync || wbc->for_reclaim) { 725 ioc = nfs_io_completion_alloc(GFP_KERNEL); 726 if (ioc) 727 nfs_io_completion_init(ioc, nfs_io_completion_commit, 728 inode); 729 priority = wb_priority(wbc); 730 } 731 732 do { 733 nfs_pageio_init_write(&pgio, inode, priority, false, 734 &nfs_async_write_completion_ops); 735 pgio.pg_io_completion = ioc; 736 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, 737 &pgio); 738 pgio.pg_error = 0; 739 nfs_pageio_complete(&pgio); 740 } while (err < 0 && !nfs_error_is_fatal(err)); 741 nfs_io_completion_put(ioc); 742 743 if (err < 0) 744 goto out_err; 745 return 0; 746 out_err: 747 return err; 748 } 749 750 /* 751 * Insert a write request into an inode 752 */ 753 static void nfs_inode_add_request(struct nfs_page *req) 754 { 755 struct folio *folio = nfs_page_to_folio(req); 756 struct address_space *mapping = folio_file_mapping(folio); 757 struct nfs_inode *nfsi = NFS_I(mapping->host); 758 759 WARN_ON_ONCE(req->wb_this_page != req); 760 761 /* Lock the request! */ 762 nfs_lock_request(req); 763 764 /* 765 * Swap-space should not get truncated. Hence no need to plug the race 766 * with invalidate/truncate. 767 */ 768 spin_lock(&mapping->private_lock); 769 if (likely(!folio_test_swapcache(folio))) { 770 set_bit(PG_MAPPED, &req->wb_flags); 771 folio_set_private(folio); 772 folio->private = req; 773 } 774 spin_unlock(&mapping->private_lock); 775 atomic_long_inc(&nfsi->nrequests); 776 /* this a head request for a page group - mark it as having an 777 * extra reference so sub groups can follow suit. 778 * This flag also informs pgio layer when to bump nrequests when 779 * adding subrequests. */ 780 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); 781 kref_get(&req->wb_kref); 782 } 783 784 /* 785 * Remove a write request from an inode 786 */ 787 static void nfs_inode_remove_request(struct nfs_page *req) 788 { 789 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) { 790 struct folio *folio = nfs_page_to_folio(req->wb_head); 791 struct address_space *mapping = folio_file_mapping(folio); 792 793 spin_lock(&mapping->private_lock); 794 if (likely(folio && !folio_test_swapcache(folio))) { 795 folio->private = NULL; 796 folio_clear_private(folio); 797 clear_bit(PG_MAPPED, &req->wb_head->wb_flags); 798 } 799 spin_unlock(&mapping->private_lock); 800 } 801 802 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) { 803 nfs_release_request(req); 804 atomic_long_dec(&NFS_I(nfs_page_to_inode(req))->nrequests); 805 } 806 } 807 808 static void nfs_mark_request_dirty(struct nfs_page *req) 809 { 810 struct folio *folio = nfs_page_to_folio(req); 811 if (folio) 812 filemap_dirty_folio(folio_mapping(folio), folio); 813 } 814 815 /* 816 * nfs_page_search_commits_for_head_request_locked 817 * 818 * Search through commit lists on @inode for the head request for @folio. 819 * Must be called while holding the inode (which is cinfo) lock. 820 * 821 * Returns the head request if found, or NULL if not found. 822 */ 823 static struct nfs_page * 824 nfs_page_search_commits_for_head_request_locked(struct nfs_inode *nfsi, 825 struct folio *folio) 826 { 827 struct nfs_page *freq, *t; 828 struct nfs_commit_info cinfo; 829 struct inode *inode = &nfsi->vfs_inode; 830 831 nfs_init_cinfo_from_inode(&cinfo, inode); 832 833 /* search through pnfs commit lists */ 834 freq = pnfs_search_commit_reqs(inode, &cinfo, folio); 835 if (freq) 836 return freq->wb_head; 837 838 /* Linearly search the commit list for the correct request */ 839 list_for_each_entry_safe(freq, t, &cinfo.mds->list, wb_list) { 840 if (nfs_page_to_folio(freq) == folio) 841 return freq->wb_head; 842 } 843 844 return NULL; 845 } 846 847 /** 848 * nfs_request_add_commit_list_locked - add request to a commit list 849 * @req: pointer to a struct nfs_page 850 * @dst: commit list head 851 * @cinfo: holds list lock and accounting info 852 * 853 * This sets the PG_CLEAN bit, updates the cinfo count of 854 * number of outstanding requests requiring a commit as well as 855 * the MM page stats. 856 * 857 * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the 858 * nfs_page lock. 859 */ 860 void 861 nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, 862 struct nfs_commit_info *cinfo) 863 { 864 set_bit(PG_CLEAN, &req->wb_flags); 865 nfs_list_add_request(req, dst); 866 atomic_long_inc(&cinfo->mds->ncommit); 867 } 868 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); 869 870 /** 871 * nfs_request_add_commit_list - add request to a commit list 872 * @req: pointer to a struct nfs_page 873 * @cinfo: holds list lock and accounting info 874 * 875 * This sets the PG_CLEAN bit, updates the cinfo count of 876 * number of outstanding requests requiring a commit as well as 877 * the MM page stats. 878 * 879 * The caller must _not_ hold the cinfo->lock, but must be 880 * holding the nfs_page lock. 881 */ 882 void 883 nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo) 884 { 885 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 886 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); 887 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 888 nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo); 889 } 890 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 891 892 /** 893 * nfs_request_remove_commit_list - Remove request from a commit list 894 * @req: pointer to a nfs_page 895 * @cinfo: holds list lock and accounting info 896 * 897 * This clears the PG_CLEAN bit, and updates the cinfo's count of 898 * number of outstanding requests requiring a commit 899 * It does not update the MM page stats. 900 * 901 * The caller _must_ hold the cinfo->lock and the nfs_page lock. 902 */ 903 void 904 nfs_request_remove_commit_list(struct nfs_page *req, 905 struct nfs_commit_info *cinfo) 906 { 907 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) 908 return; 909 nfs_list_remove_request(req); 910 atomic_long_dec(&cinfo->mds->ncommit); 911 } 912 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); 913 914 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 915 struct inode *inode) 916 { 917 cinfo->inode = inode; 918 cinfo->mds = &NFS_I(inode)->commit_info; 919 cinfo->ds = pnfs_get_ds_info(inode); 920 cinfo->dreq = NULL; 921 cinfo->completion_ops = &nfs_commit_completion_ops; 922 } 923 924 void nfs_init_cinfo(struct nfs_commit_info *cinfo, 925 struct inode *inode, 926 struct nfs_direct_req *dreq) 927 { 928 if (dreq) 929 nfs_init_cinfo_from_dreq(cinfo, dreq); 930 else 931 nfs_init_cinfo_from_inode(cinfo, inode); 932 } 933 EXPORT_SYMBOL_GPL(nfs_init_cinfo); 934 935 /* 936 * Add a request to the inode's commit list. 937 */ 938 void 939 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 940 struct nfs_commit_info *cinfo, u32 ds_commit_idx) 941 { 942 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx)) 943 return; 944 nfs_request_add_commit_list(req, cinfo); 945 } 946 947 static void nfs_folio_clear_commit(struct folio *folio) 948 { 949 if (folio) { 950 long nr = folio_nr_pages(folio); 951 952 node_stat_mod_folio(folio, NR_WRITEBACK, -nr); 953 wb_stat_mod(&inode_to_bdi(folio_file_mapping(folio)->host)->wb, 954 WB_WRITEBACK, -nr); 955 } 956 } 957 958 /* Called holding the request lock on @req */ 959 static void 960 nfs_clear_request_commit(struct nfs_page *req) 961 { 962 if (test_bit(PG_CLEAN, &req->wb_flags)) { 963 struct nfs_open_context *ctx = nfs_req_openctx(req); 964 struct inode *inode = d_inode(ctx->dentry); 965 struct nfs_commit_info cinfo; 966 967 nfs_init_cinfo_from_inode(&cinfo, inode); 968 mutex_lock(&NFS_I(inode)->commit_mutex); 969 if (!pnfs_clear_request_commit(req, &cinfo)) { 970 nfs_request_remove_commit_list(req, &cinfo); 971 } 972 mutex_unlock(&NFS_I(inode)->commit_mutex); 973 nfs_folio_clear_commit(nfs_page_to_folio(req)); 974 } 975 } 976 977 int nfs_write_need_commit(struct nfs_pgio_header *hdr) 978 { 979 if (hdr->verf.committed == NFS_DATA_SYNC) 980 return hdr->lseg == NULL; 981 return hdr->verf.committed != NFS_FILE_SYNC; 982 } 983 984 static void nfs_async_write_init(struct nfs_pgio_header *hdr) 985 { 986 nfs_io_completion_get(hdr->io_completion); 987 } 988 989 static void nfs_write_completion(struct nfs_pgio_header *hdr) 990 { 991 struct nfs_commit_info cinfo; 992 unsigned long bytes = 0; 993 994 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 995 goto out; 996 nfs_init_cinfo_from_inode(&cinfo, hdr->inode); 997 while (!list_empty(&hdr->pages)) { 998 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 999 1000 bytes += req->wb_bytes; 1001 nfs_list_remove_request(req); 1002 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && 1003 (hdr->good_bytes < bytes)) { 1004 trace_nfs_comp_error(hdr->inode, req, hdr->error); 1005 nfs_mapping_set_error(nfs_page_to_folio(req), 1006 hdr->error); 1007 goto remove_req; 1008 } 1009 if (nfs_write_need_commit(hdr)) { 1010 /* Reset wb_nio, since the write was successful. */ 1011 req->wb_nio = 0; 1012 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); 1013 nfs_mark_request_commit(req, hdr->lseg, &cinfo, 1014 hdr->pgio_mirror_idx); 1015 goto next; 1016 } 1017 remove_req: 1018 nfs_inode_remove_request(req); 1019 next: 1020 nfs_page_end_writeback(req); 1021 nfs_release_request(req); 1022 } 1023 out: 1024 nfs_io_completion_put(hdr->io_completion); 1025 hdr->release(hdr); 1026 } 1027 1028 unsigned long 1029 nfs_reqs_to_commit(struct nfs_commit_info *cinfo) 1030 { 1031 return atomic_long_read(&cinfo->mds->ncommit); 1032 } 1033 1034 /* NFS_I(cinfo->inode)->commit_mutex held by caller */ 1035 int 1036 nfs_scan_commit_list(struct list_head *src, struct list_head *dst, 1037 struct nfs_commit_info *cinfo, int max) 1038 { 1039 struct nfs_page *req, *tmp; 1040 int ret = 0; 1041 1042 list_for_each_entry_safe(req, tmp, src, wb_list) { 1043 kref_get(&req->wb_kref); 1044 if (!nfs_lock_request(req)) { 1045 nfs_release_request(req); 1046 continue; 1047 } 1048 nfs_request_remove_commit_list(req, cinfo); 1049 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); 1050 nfs_list_add_request(req, dst); 1051 ret++; 1052 if ((ret == max) && !cinfo->dreq) 1053 break; 1054 cond_resched(); 1055 } 1056 return ret; 1057 } 1058 EXPORT_SYMBOL_GPL(nfs_scan_commit_list); 1059 1060 /* 1061 * nfs_scan_commit - Scan an inode for commit requests 1062 * @inode: NFS inode to scan 1063 * @dst: mds destination list 1064 * @cinfo: mds and ds lists of reqs ready to commit 1065 * 1066 * Moves requests from the inode's 'commit' request list. 1067 * The requests are *not* checked to ensure that they form a contiguous set. 1068 */ 1069 int 1070 nfs_scan_commit(struct inode *inode, struct list_head *dst, 1071 struct nfs_commit_info *cinfo) 1072 { 1073 int ret = 0; 1074 1075 if (!atomic_long_read(&cinfo->mds->ncommit)) 1076 return 0; 1077 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 1078 if (atomic_long_read(&cinfo->mds->ncommit) > 0) { 1079 const int max = INT_MAX; 1080 1081 ret = nfs_scan_commit_list(&cinfo->mds->list, dst, 1082 cinfo, max); 1083 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); 1084 } 1085 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1086 return ret; 1087 } 1088 1089 /* 1090 * Search for an existing write request, and attempt to update 1091 * it to reflect a new dirty region on a given page. 1092 * 1093 * If the attempt fails, then the existing request is flushed out 1094 * to disk. 1095 */ 1096 static struct nfs_page *nfs_try_to_update_request(struct folio *folio, 1097 unsigned int offset, 1098 unsigned int bytes) 1099 { 1100 struct nfs_page *req; 1101 unsigned int rqend; 1102 unsigned int end; 1103 int error; 1104 1105 end = offset + bytes; 1106 1107 req = nfs_lock_and_join_requests(folio); 1108 if (IS_ERR_OR_NULL(req)) 1109 return req; 1110 1111 rqend = req->wb_offset + req->wb_bytes; 1112 /* 1113 * Tell the caller to flush out the request if 1114 * the offsets are non-contiguous. 1115 * Note: nfs_flush_incompatible() will already 1116 * have flushed out requests having wrong owners. 1117 */ 1118 if (offset > rqend || end < req->wb_offset) 1119 goto out_flushme; 1120 1121 /* Okay, the request matches. Update the region */ 1122 if (offset < req->wb_offset) { 1123 req->wb_offset = offset; 1124 req->wb_pgbase = offset; 1125 } 1126 if (end > rqend) 1127 req->wb_bytes = end - req->wb_offset; 1128 else 1129 req->wb_bytes = rqend - req->wb_offset; 1130 req->wb_nio = 0; 1131 return req; 1132 out_flushme: 1133 /* 1134 * Note: we mark the request dirty here because 1135 * nfs_lock_and_join_requests() cannot preserve 1136 * commit flags, so we have to replay the write. 1137 */ 1138 nfs_mark_request_dirty(req); 1139 nfs_unlock_and_release_request(req); 1140 error = nfs_wb_folio(folio_file_mapping(folio)->host, folio); 1141 return (error < 0) ? ERR_PTR(error) : NULL; 1142 } 1143 1144 /* 1145 * Try to update an existing write request, or create one if there is none. 1146 * 1147 * Note: Should always be called with the Page Lock held to prevent races 1148 * if we have to add a new request. Also assumes that the caller has 1149 * already called nfs_flush_incompatible() if necessary. 1150 */ 1151 static struct nfs_page *nfs_setup_write_request(struct nfs_open_context *ctx, 1152 struct folio *folio, 1153 unsigned int offset, 1154 unsigned int bytes) 1155 { 1156 struct nfs_page *req; 1157 1158 req = nfs_try_to_update_request(folio, offset, bytes); 1159 if (req != NULL) 1160 goto out; 1161 req = nfs_page_create_from_folio(ctx, folio, offset, bytes); 1162 if (IS_ERR(req)) 1163 goto out; 1164 nfs_inode_add_request(req); 1165 out: 1166 return req; 1167 } 1168 1169 static int nfs_writepage_setup(struct nfs_open_context *ctx, 1170 struct folio *folio, unsigned int offset, 1171 unsigned int count) 1172 { 1173 struct nfs_page *req; 1174 1175 req = nfs_setup_write_request(ctx, folio, offset, count); 1176 if (IS_ERR(req)) 1177 return PTR_ERR(req); 1178 /* Update file length */ 1179 nfs_grow_file(folio, offset, count); 1180 nfs_mark_uptodate(req); 1181 nfs_mark_request_dirty(req); 1182 nfs_unlock_and_release_request(req); 1183 return 0; 1184 } 1185 1186 int nfs_flush_incompatible(struct file *file, struct folio *folio) 1187 { 1188 struct nfs_open_context *ctx = nfs_file_open_context(file); 1189 struct nfs_lock_context *l_ctx; 1190 struct file_lock_context *flctx = locks_inode_context(file_inode(file)); 1191 struct nfs_page *req; 1192 int do_flush, status; 1193 /* 1194 * Look for a request corresponding to this page. If there 1195 * is one, and it belongs to another file, we flush it out 1196 * before we try to copy anything into the page. Do this 1197 * due to the lack of an ACCESS-type call in NFSv2. 1198 * Also do the same if we find a request from an existing 1199 * dropped page. 1200 */ 1201 do { 1202 req = nfs_folio_find_head_request(folio); 1203 if (req == NULL) 1204 return 0; 1205 l_ctx = req->wb_lock_context; 1206 do_flush = nfs_page_to_folio(req) != folio || 1207 !nfs_match_open_context(nfs_req_openctx(req), ctx); 1208 if (l_ctx && flctx && 1209 !(list_empty_careful(&flctx->flc_posix) && 1210 list_empty_careful(&flctx->flc_flock))) { 1211 do_flush |= l_ctx->lockowner != current->files; 1212 } 1213 nfs_release_request(req); 1214 if (!do_flush) 1215 return 0; 1216 status = nfs_wb_folio(folio_file_mapping(folio)->host, folio); 1217 } while (status == 0); 1218 return status; 1219 } 1220 1221 /* 1222 * Avoid buffered writes when a open context credential's key would 1223 * expire soon. 1224 * 1225 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL. 1226 * 1227 * Return 0 and set a credential flag which triggers the inode to flush 1228 * and performs NFS_FILE_SYNC writes if the key will expired within 1229 * RPC_KEY_EXPIRE_TIMEO. 1230 */ 1231 int 1232 nfs_key_timeout_notify(struct file *filp, struct inode *inode) 1233 { 1234 struct nfs_open_context *ctx = nfs_file_open_context(filp); 1235 1236 if (nfs_ctx_key_to_expire(ctx, inode) && 1237 !rcu_access_pointer(ctx->ll_cred)) 1238 /* Already expired! */ 1239 return -EACCES; 1240 return 0; 1241 } 1242 1243 /* 1244 * Test if the open context credential key is marked to expire soon. 1245 */ 1246 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode) 1247 { 1248 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; 1249 struct rpc_cred *cred, *new, *old = NULL; 1250 struct auth_cred acred = { 1251 .cred = ctx->cred, 1252 }; 1253 bool ret = false; 1254 1255 rcu_read_lock(); 1256 cred = rcu_dereference(ctx->ll_cred); 1257 if (cred && !(cred->cr_ops->crkey_timeout && 1258 cred->cr_ops->crkey_timeout(cred))) 1259 goto out; 1260 rcu_read_unlock(); 1261 1262 new = auth->au_ops->lookup_cred(auth, &acred, 0); 1263 if (new == cred) { 1264 put_rpccred(new); 1265 return true; 1266 } 1267 if (IS_ERR_OR_NULL(new)) { 1268 new = NULL; 1269 ret = true; 1270 } else if (new->cr_ops->crkey_timeout && 1271 new->cr_ops->crkey_timeout(new)) 1272 ret = true; 1273 1274 rcu_read_lock(); 1275 old = rcu_dereference_protected(xchg(&ctx->ll_cred, 1276 RCU_INITIALIZER(new)), 1); 1277 out: 1278 rcu_read_unlock(); 1279 put_rpccred(old); 1280 return ret; 1281 } 1282 1283 /* 1284 * If the page cache is marked as unsafe or invalid, then we can't rely on 1285 * the PageUptodate() flag. In this case, we will need to turn off 1286 * write optimisations that depend on the page contents being correct. 1287 */ 1288 static bool nfs_folio_write_uptodate(struct folio *folio, unsigned int pagelen) 1289 { 1290 struct inode *inode = folio_file_mapping(folio)->host; 1291 struct nfs_inode *nfsi = NFS_I(inode); 1292 1293 if (nfs_have_delegated_attributes(inode)) 1294 goto out; 1295 if (nfsi->cache_validity & 1296 (NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE)) 1297 return false; 1298 smp_rmb(); 1299 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags) && pagelen != 0) 1300 return false; 1301 out: 1302 if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0) 1303 return false; 1304 return folio_test_uptodate(folio) != 0; 1305 } 1306 1307 static bool 1308 is_whole_file_wrlock(struct file_lock *fl) 1309 { 1310 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && 1311 fl->fl_type == F_WRLCK; 1312 } 1313 1314 /* If we know the page is up to date, and we're not using byte range locks (or 1315 * if we have the whole file locked for writing), it may be more efficient to 1316 * extend the write to cover the entire page in order to avoid fragmentation 1317 * inefficiencies. 1318 * 1319 * If the file is opened for synchronous writes then we can just skip the rest 1320 * of the checks. 1321 */ 1322 static int nfs_can_extend_write(struct file *file, struct folio *folio, 1323 unsigned int pagelen) 1324 { 1325 struct inode *inode = file_inode(file); 1326 struct file_lock_context *flctx = locks_inode_context(inode); 1327 struct file_lock *fl; 1328 int ret; 1329 1330 if (file->f_flags & O_DSYNC) 1331 return 0; 1332 if (!nfs_folio_write_uptodate(folio, pagelen)) 1333 return 0; 1334 if (NFS_PROTO(inode)->have_delegation(inode, FMODE_WRITE)) 1335 return 1; 1336 if (!flctx || (list_empty_careful(&flctx->flc_flock) && 1337 list_empty_careful(&flctx->flc_posix))) 1338 return 1; 1339 1340 /* Check to see if there are whole file write locks */ 1341 ret = 0; 1342 spin_lock(&flctx->flc_lock); 1343 if (!list_empty(&flctx->flc_posix)) { 1344 fl = list_first_entry(&flctx->flc_posix, struct file_lock, 1345 fl_list); 1346 if (is_whole_file_wrlock(fl)) 1347 ret = 1; 1348 } else if (!list_empty(&flctx->flc_flock)) { 1349 fl = list_first_entry(&flctx->flc_flock, struct file_lock, 1350 fl_list); 1351 if (fl->fl_type == F_WRLCK) 1352 ret = 1; 1353 } 1354 spin_unlock(&flctx->flc_lock); 1355 return ret; 1356 } 1357 1358 /* 1359 * Update and possibly write a cached page of an NFS file. 1360 * 1361 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad 1362 * things with a page scheduled for an RPC call (e.g. invalidate it). 1363 */ 1364 int nfs_update_folio(struct file *file, struct folio *folio, 1365 unsigned int offset, unsigned int count) 1366 { 1367 struct nfs_open_context *ctx = nfs_file_open_context(file); 1368 struct address_space *mapping = folio_file_mapping(folio); 1369 struct inode *inode = mapping->host; 1370 unsigned int pagelen = nfs_folio_length(folio); 1371 int status = 0; 1372 1373 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 1374 1375 dprintk("NFS: nfs_update_folio(%pD2 %d@%lld)\n", file, count, 1376 (long long)(folio_file_pos(folio) + offset)); 1377 1378 if (!count) 1379 goto out; 1380 1381 if (nfs_can_extend_write(file, folio, pagelen)) { 1382 count = max(count + offset, pagelen); 1383 offset = 0; 1384 } 1385 1386 status = nfs_writepage_setup(ctx, folio, offset, count); 1387 if (status < 0) 1388 nfs_set_pageerror(mapping); 1389 out: 1390 dprintk("NFS: nfs_update_folio returns %d (isize %lld)\n", 1391 status, (long long)i_size_read(inode)); 1392 return status; 1393 } 1394 1395 static int flush_task_priority(int how) 1396 { 1397 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { 1398 case FLUSH_HIGHPRI: 1399 return RPC_PRIORITY_HIGH; 1400 case FLUSH_LOWPRI: 1401 return RPC_PRIORITY_LOW; 1402 } 1403 return RPC_PRIORITY_NORMAL; 1404 } 1405 1406 static void nfs_initiate_write(struct nfs_pgio_header *hdr, 1407 struct rpc_message *msg, 1408 const struct nfs_rpc_ops *rpc_ops, 1409 struct rpc_task_setup *task_setup_data, int how) 1410 { 1411 int priority = flush_task_priority(how); 1412 1413 if (IS_SWAPFILE(hdr->inode)) 1414 task_setup_data->flags |= RPC_TASK_SWAPPER; 1415 task_setup_data->priority = priority; 1416 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); 1417 trace_nfs_initiate_write(hdr); 1418 } 1419 1420 /* If a nfs_flush_* function fails, it should remove reqs from @head and 1421 * call this on each, which will prepare them to be retried on next 1422 * writeback using standard nfs. 1423 */ 1424 static void nfs_redirty_request(struct nfs_page *req) 1425 { 1426 struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req)); 1427 1428 /* Bump the transmission count */ 1429 req->wb_nio++; 1430 nfs_mark_request_dirty(req); 1431 atomic_long_inc(&nfsi->redirtied_pages); 1432 nfs_page_end_writeback(req); 1433 nfs_release_request(req); 1434 } 1435 1436 static void nfs_async_write_error(struct list_head *head, int error) 1437 { 1438 struct nfs_page *req; 1439 1440 while (!list_empty(head)) { 1441 req = nfs_list_entry(head->next); 1442 nfs_list_remove_request(req); 1443 if (nfs_error_is_fatal_on_server(error)) 1444 nfs_write_error(req, error); 1445 else 1446 nfs_redirty_request(req); 1447 } 1448 } 1449 1450 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) 1451 { 1452 nfs_async_write_error(&hdr->pages, 0); 1453 } 1454 1455 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { 1456 .init_hdr = nfs_async_write_init, 1457 .error_cleanup = nfs_async_write_error, 1458 .completion = nfs_write_completion, 1459 .reschedule_io = nfs_async_write_reschedule_io, 1460 }; 1461 1462 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, 1463 struct inode *inode, int ioflags, bool force_mds, 1464 const struct nfs_pgio_completion_ops *compl_ops) 1465 { 1466 struct nfs_server *server = NFS_SERVER(inode); 1467 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; 1468 1469 #ifdef CONFIG_NFS_V4_1 1470 if (server->pnfs_curr_ld && !force_mds) 1471 pg_ops = server->pnfs_curr_ld->pg_write_ops; 1472 #endif 1473 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops, 1474 server->wsize, ioflags); 1475 } 1476 EXPORT_SYMBOL_GPL(nfs_pageio_init_write); 1477 1478 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) 1479 { 1480 struct nfs_pgio_mirror *mirror; 1481 1482 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) 1483 pgio->pg_ops->pg_cleanup(pgio); 1484 1485 pgio->pg_ops = &nfs_pgio_rw_ops; 1486 1487 nfs_pageio_stop_mirroring(pgio); 1488 1489 mirror = &pgio->pg_mirrors[0]; 1490 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; 1491 } 1492 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); 1493 1494 1495 void nfs_commit_prepare(struct rpc_task *task, void *calldata) 1496 { 1497 struct nfs_commit_data *data = calldata; 1498 1499 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); 1500 } 1501 1502 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, 1503 struct nfs_fattr *fattr) 1504 { 1505 struct nfs_pgio_args *argp = &hdr->args; 1506 struct nfs_pgio_res *resp = &hdr->res; 1507 u64 size = argp->offset + resp->count; 1508 1509 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1510 fattr->size = size; 1511 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { 1512 fattr->valid &= ~NFS_ATTR_FATTR_SIZE; 1513 return; 1514 } 1515 if (size != fattr->size) 1516 return; 1517 /* Set attribute barrier */ 1518 nfs_fattr_set_barrier(fattr); 1519 /* ...and update size */ 1520 fattr->valid |= NFS_ATTR_FATTR_SIZE; 1521 } 1522 1523 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1524 { 1525 struct nfs_fattr *fattr = &hdr->fattr; 1526 struct inode *inode = hdr->inode; 1527 1528 spin_lock(&inode->i_lock); 1529 nfs_writeback_check_extend(hdr, fattr); 1530 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1531 spin_unlock(&inode->i_lock); 1532 } 1533 EXPORT_SYMBOL_GPL(nfs_writeback_update_inode); 1534 1535 /* 1536 * This function is called when the WRITE call is complete. 1537 */ 1538 static int nfs_writeback_done(struct rpc_task *task, 1539 struct nfs_pgio_header *hdr, 1540 struct inode *inode) 1541 { 1542 int status; 1543 1544 /* 1545 * ->write_done will attempt to use post-op attributes to detect 1546 * conflicting writes by other clients. A strict interpretation 1547 * of close-to-open would allow us to continue caching even if 1548 * another writer had changed the file, but some applications 1549 * depend on tighter cache coherency when writing. 1550 */ 1551 status = NFS_PROTO(inode)->write_done(task, hdr); 1552 if (status != 0) 1553 return status; 1554 1555 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); 1556 trace_nfs_writeback_done(task, hdr); 1557 1558 if (task->tk_status >= 0) { 1559 enum nfs3_stable_how committed = hdr->res.verf->committed; 1560 1561 if (committed == NFS_UNSTABLE) { 1562 /* 1563 * We have some uncommitted data on the server at 1564 * this point, so ensure that we keep track of that 1565 * fact irrespective of what later writes do. 1566 */ 1567 set_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags); 1568 } 1569 1570 if (committed < hdr->args.stable) { 1571 /* We tried a write call, but the server did not 1572 * commit data to stable storage even though we 1573 * requested it. 1574 * Note: There is a known bug in Tru64 < 5.0 in which 1575 * the server reports NFS_DATA_SYNC, but performs 1576 * NFS_FILE_SYNC. We therefore implement this checking 1577 * as a dprintk() in order to avoid filling syslog. 1578 */ 1579 static unsigned long complain; 1580 1581 /* Note this will print the MDS for a DS write */ 1582 if (time_before(complain, jiffies)) { 1583 dprintk("NFS: faulty NFS server %s:" 1584 " (committed = %d) != (stable = %d)\n", 1585 NFS_SERVER(inode)->nfs_client->cl_hostname, 1586 committed, hdr->args.stable); 1587 complain = jiffies + 300 * HZ; 1588 } 1589 } 1590 } 1591 1592 /* Deal with the suid/sgid bit corner case */ 1593 if (nfs_should_remove_suid(inode)) { 1594 spin_lock(&inode->i_lock); 1595 nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE); 1596 spin_unlock(&inode->i_lock); 1597 } 1598 return 0; 1599 } 1600 1601 /* 1602 * This function is called when the WRITE call is complete. 1603 */ 1604 static void nfs_writeback_result(struct rpc_task *task, 1605 struct nfs_pgio_header *hdr) 1606 { 1607 struct nfs_pgio_args *argp = &hdr->args; 1608 struct nfs_pgio_res *resp = &hdr->res; 1609 1610 if (resp->count < argp->count) { 1611 static unsigned long complain; 1612 1613 /* This a short write! */ 1614 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); 1615 1616 /* Has the server at least made some progress? */ 1617 if (resp->count == 0) { 1618 if (time_before(complain, jiffies)) { 1619 printk(KERN_WARNING 1620 "NFS: Server wrote zero bytes, expected %u.\n", 1621 argp->count); 1622 complain = jiffies + 300 * HZ; 1623 } 1624 nfs_set_pgio_error(hdr, -EIO, argp->offset); 1625 task->tk_status = -EIO; 1626 return; 1627 } 1628 1629 /* For non rpc-based layout drivers, retry-through-MDS */ 1630 if (!task->tk_ops) { 1631 hdr->pnfs_error = -EAGAIN; 1632 return; 1633 } 1634 1635 /* Was this an NFSv2 write or an NFSv3 stable write? */ 1636 if (resp->verf->committed != NFS_UNSTABLE) { 1637 /* Resend from where the server left off */ 1638 hdr->mds_offset += resp->count; 1639 argp->offset += resp->count; 1640 argp->pgbase += resp->count; 1641 argp->count -= resp->count; 1642 } else { 1643 /* Resend as a stable write in order to avoid 1644 * headaches in the case of a server crash. 1645 */ 1646 argp->stable = NFS_FILE_SYNC; 1647 } 1648 resp->count = 0; 1649 resp->verf->committed = 0; 1650 rpc_restart_call_prepare(task); 1651 } 1652 } 1653 1654 static int wait_on_commit(struct nfs_mds_commit_info *cinfo) 1655 { 1656 return wait_var_event_killable(&cinfo->rpcs_out, 1657 !atomic_read(&cinfo->rpcs_out)); 1658 } 1659 1660 static void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) 1661 { 1662 atomic_inc(&cinfo->rpcs_out); 1663 } 1664 1665 bool nfs_commit_end(struct nfs_mds_commit_info *cinfo) 1666 { 1667 if (atomic_dec_and_test(&cinfo->rpcs_out)) { 1668 wake_up_var(&cinfo->rpcs_out); 1669 return true; 1670 } 1671 return false; 1672 } 1673 1674 void nfs_commitdata_release(struct nfs_commit_data *data) 1675 { 1676 put_nfs_open_context(data->context); 1677 nfs_commit_free(data); 1678 } 1679 EXPORT_SYMBOL_GPL(nfs_commitdata_release); 1680 1681 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, 1682 const struct nfs_rpc_ops *nfs_ops, 1683 const struct rpc_call_ops *call_ops, 1684 int how, int flags) 1685 { 1686 struct rpc_task *task; 1687 int priority = flush_task_priority(how); 1688 struct rpc_message msg = { 1689 .rpc_argp = &data->args, 1690 .rpc_resp = &data->res, 1691 .rpc_cred = data->cred, 1692 }; 1693 struct rpc_task_setup task_setup_data = { 1694 .task = &data->task, 1695 .rpc_client = clnt, 1696 .rpc_message = &msg, 1697 .callback_ops = call_ops, 1698 .callback_data = data, 1699 .workqueue = nfsiod_workqueue, 1700 .flags = RPC_TASK_ASYNC | flags, 1701 .priority = priority, 1702 }; 1703 1704 if (nfs_server_capable(data->inode, NFS_CAP_MOVEABLE)) 1705 task_setup_data.flags |= RPC_TASK_MOVEABLE; 1706 1707 /* Set up the initial task struct. */ 1708 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client); 1709 trace_nfs_initiate_commit(data); 1710 1711 dprintk("NFS: initiated commit call\n"); 1712 1713 task = rpc_run_task(&task_setup_data); 1714 if (IS_ERR(task)) 1715 return PTR_ERR(task); 1716 if (how & FLUSH_SYNC) 1717 rpc_wait_for_completion_task(task); 1718 rpc_put_task(task); 1719 return 0; 1720 } 1721 EXPORT_SYMBOL_GPL(nfs_initiate_commit); 1722 1723 static loff_t nfs_get_lwb(struct list_head *head) 1724 { 1725 loff_t lwb = 0; 1726 struct nfs_page *req; 1727 1728 list_for_each_entry(req, head, wb_list) 1729 if (lwb < (req_offset(req) + req->wb_bytes)) 1730 lwb = req_offset(req) + req->wb_bytes; 1731 1732 return lwb; 1733 } 1734 1735 /* 1736 * Set up the argument/result storage required for the RPC call. 1737 */ 1738 void nfs_init_commit(struct nfs_commit_data *data, 1739 struct list_head *head, 1740 struct pnfs_layout_segment *lseg, 1741 struct nfs_commit_info *cinfo) 1742 { 1743 struct nfs_page *first; 1744 struct nfs_open_context *ctx; 1745 struct inode *inode; 1746 1747 /* Set up the RPC argument and reply structs 1748 * NB: take care not to mess about with data->commit et al. */ 1749 1750 if (head) 1751 list_splice_init(head, &data->pages); 1752 1753 first = nfs_list_entry(data->pages.next); 1754 ctx = nfs_req_openctx(first); 1755 inode = d_inode(ctx->dentry); 1756 1757 data->inode = inode; 1758 data->cred = ctx->cred; 1759 data->lseg = lseg; /* reference transferred */ 1760 /* only set lwb for pnfs commit */ 1761 if (lseg) 1762 data->lwb = nfs_get_lwb(&data->pages); 1763 data->mds_ops = &nfs_commit_ops; 1764 data->completion_ops = cinfo->completion_ops; 1765 data->dreq = cinfo->dreq; 1766 1767 data->args.fh = NFS_FH(data->inode); 1768 /* Note: we always request a commit of the entire inode */ 1769 data->args.offset = 0; 1770 data->args.count = 0; 1771 data->context = get_nfs_open_context(ctx); 1772 data->res.fattr = &data->fattr; 1773 data->res.verf = &data->verf; 1774 nfs_fattr_init(&data->fattr); 1775 nfs_commit_begin(cinfo->mds); 1776 } 1777 EXPORT_SYMBOL_GPL(nfs_init_commit); 1778 1779 void nfs_retry_commit(struct list_head *page_list, 1780 struct pnfs_layout_segment *lseg, 1781 struct nfs_commit_info *cinfo, 1782 u32 ds_commit_idx) 1783 { 1784 struct nfs_page *req; 1785 1786 while (!list_empty(page_list)) { 1787 req = nfs_list_entry(page_list->next); 1788 nfs_list_remove_request(req); 1789 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); 1790 nfs_folio_clear_commit(nfs_page_to_folio(req)); 1791 nfs_unlock_and_release_request(req); 1792 } 1793 } 1794 EXPORT_SYMBOL_GPL(nfs_retry_commit); 1795 1796 static void nfs_commit_resched_write(struct nfs_commit_info *cinfo, 1797 struct nfs_page *req) 1798 { 1799 struct folio *folio = nfs_page_to_folio(req); 1800 1801 filemap_dirty_folio(folio_mapping(folio), folio); 1802 } 1803 1804 /* 1805 * Commit dirty pages 1806 */ 1807 static int 1808 nfs_commit_list(struct inode *inode, struct list_head *head, int how, 1809 struct nfs_commit_info *cinfo) 1810 { 1811 struct nfs_commit_data *data; 1812 unsigned short task_flags = 0; 1813 1814 /* another commit raced with us */ 1815 if (list_empty(head)) 1816 return 0; 1817 1818 data = nfs_commitdata_alloc(); 1819 if (!data) { 1820 nfs_retry_commit(head, NULL, cinfo, -1); 1821 return -ENOMEM; 1822 } 1823 1824 /* Set up the argument struct */ 1825 nfs_init_commit(data, head, NULL, cinfo); 1826 if (NFS_SERVER(inode)->nfs_client->cl_minorversion) 1827 task_flags = RPC_TASK_MOVEABLE; 1828 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), 1829 data->mds_ops, how, 1830 RPC_TASK_CRED_NOREF | task_flags); 1831 } 1832 1833 /* 1834 * COMMIT call returned 1835 */ 1836 static void nfs_commit_done(struct rpc_task *task, void *calldata) 1837 { 1838 struct nfs_commit_data *data = calldata; 1839 1840 /* Call the NFS version-specific code */ 1841 NFS_PROTO(data->inode)->commit_done(task, data); 1842 trace_nfs_commit_done(task, data); 1843 } 1844 1845 static void nfs_commit_release_pages(struct nfs_commit_data *data) 1846 { 1847 const struct nfs_writeverf *verf = data->res.verf; 1848 struct nfs_page *req; 1849 int status = data->task.tk_status; 1850 struct nfs_commit_info cinfo; 1851 struct nfs_server *nfss; 1852 struct folio *folio; 1853 1854 while (!list_empty(&data->pages)) { 1855 req = nfs_list_entry(data->pages.next); 1856 nfs_list_remove_request(req); 1857 folio = nfs_page_to_folio(req); 1858 nfs_folio_clear_commit(folio); 1859 1860 dprintk("NFS: commit (%s/%llu %d@%lld)", 1861 nfs_req_openctx(req)->dentry->d_sb->s_id, 1862 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)), 1863 req->wb_bytes, 1864 (long long)req_offset(req)); 1865 if (status < 0) { 1866 if (folio) { 1867 trace_nfs_commit_error(data->inode, req, 1868 status); 1869 nfs_mapping_set_error(folio, status); 1870 nfs_inode_remove_request(req); 1871 } 1872 dprintk_cont(", error = %d\n", status); 1873 goto next; 1874 } 1875 1876 /* Okay, COMMIT succeeded, apparently. Check the verifier 1877 * returned by the server against all stored verfs. */ 1878 if (nfs_write_match_verf(verf, req)) { 1879 /* We have a match */ 1880 if (folio) 1881 nfs_inode_remove_request(req); 1882 dprintk_cont(" OK\n"); 1883 goto next; 1884 } 1885 /* We have a mismatch. Write the page again */ 1886 dprintk_cont(" mismatch\n"); 1887 nfs_mark_request_dirty(req); 1888 atomic_long_inc(&NFS_I(data->inode)->redirtied_pages); 1889 next: 1890 nfs_unlock_and_release_request(req); 1891 /* Latency breaker */ 1892 cond_resched(); 1893 } 1894 nfss = NFS_SERVER(data->inode); 1895 if (atomic_long_read(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 1896 nfss->write_congested = 0; 1897 1898 nfs_init_cinfo(&cinfo, data->inode, data->dreq); 1899 nfs_commit_end(cinfo.mds); 1900 } 1901 1902 static void nfs_commit_release(void *calldata) 1903 { 1904 struct nfs_commit_data *data = calldata; 1905 1906 data->completion_ops->completion(data); 1907 nfs_commitdata_release(calldata); 1908 } 1909 1910 static const struct rpc_call_ops nfs_commit_ops = { 1911 .rpc_call_prepare = nfs_commit_prepare, 1912 .rpc_call_done = nfs_commit_done, 1913 .rpc_release = nfs_commit_release, 1914 }; 1915 1916 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = { 1917 .completion = nfs_commit_release_pages, 1918 .resched_write = nfs_commit_resched_write, 1919 }; 1920 1921 int nfs_generic_commit_list(struct inode *inode, struct list_head *head, 1922 int how, struct nfs_commit_info *cinfo) 1923 { 1924 int status; 1925 1926 status = pnfs_commit_list(inode, head, how, cinfo); 1927 if (status == PNFS_NOT_ATTEMPTED) 1928 status = nfs_commit_list(inode, head, how, cinfo); 1929 return status; 1930 } 1931 1932 static int __nfs_commit_inode(struct inode *inode, int how, 1933 struct writeback_control *wbc) 1934 { 1935 LIST_HEAD(head); 1936 struct nfs_commit_info cinfo; 1937 int may_wait = how & FLUSH_SYNC; 1938 int ret, nscan; 1939 1940 how &= ~FLUSH_SYNC; 1941 nfs_init_cinfo_from_inode(&cinfo, inode); 1942 nfs_commit_begin(cinfo.mds); 1943 for (;;) { 1944 ret = nscan = nfs_scan_commit(inode, &head, &cinfo); 1945 if (ret <= 0) 1946 break; 1947 ret = nfs_generic_commit_list(inode, &head, how, &cinfo); 1948 if (ret < 0) 1949 break; 1950 ret = 0; 1951 if (wbc && wbc->sync_mode == WB_SYNC_NONE) { 1952 if (nscan < wbc->nr_to_write) 1953 wbc->nr_to_write -= nscan; 1954 else 1955 wbc->nr_to_write = 0; 1956 } 1957 if (nscan < INT_MAX) 1958 break; 1959 cond_resched(); 1960 } 1961 nfs_commit_end(cinfo.mds); 1962 if (ret || !may_wait) 1963 return ret; 1964 return wait_on_commit(cinfo.mds); 1965 } 1966 1967 int nfs_commit_inode(struct inode *inode, int how) 1968 { 1969 return __nfs_commit_inode(inode, how, NULL); 1970 } 1971 EXPORT_SYMBOL_GPL(nfs_commit_inode); 1972 1973 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1974 { 1975 struct nfs_inode *nfsi = NFS_I(inode); 1976 int flags = FLUSH_SYNC; 1977 int ret = 0; 1978 1979 if (wbc->sync_mode == WB_SYNC_NONE) { 1980 /* no commits means nothing needs to be done */ 1981 if (!atomic_long_read(&nfsi->commit_info.ncommit)) 1982 goto check_requests_outstanding; 1983 1984 /* Don't commit yet if this is a non-blocking flush and there 1985 * are a lot of outstanding writes for this mapping. 1986 */ 1987 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) 1988 goto out_mark_dirty; 1989 1990 /* don't wait for the COMMIT response */ 1991 flags = 0; 1992 } 1993 1994 ret = __nfs_commit_inode(inode, flags, wbc); 1995 if (!ret) { 1996 if (flags & FLUSH_SYNC) 1997 return 0; 1998 } else if (atomic_long_read(&nfsi->commit_info.ncommit)) 1999 goto out_mark_dirty; 2000 2001 check_requests_outstanding: 2002 if (!atomic_read(&nfsi->commit_info.rpcs_out)) 2003 return ret; 2004 out_mark_dirty: 2005 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 2006 return ret; 2007 } 2008 EXPORT_SYMBOL_GPL(nfs_write_inode); 2009 2010 /* 2011 * Wrapper for filemap_write_and_wait_range() 2012 * 2013 * Needed for pNFS in order to ensure data becomes visible to the 2014 * client. 2015 */ 2016 int nfs_filemap_write_and_wait_range(struct address_space *mapping, 2017 loff_t lstart, loff_t lend) 2018 { 2019 int ret; 2020 2021 ret = filemap_write_and_wait_range(mapping, lstart, lend); 2022 if (ret == 0) 2023 ret = pnfs_sync_inode(mapping->host, true); 2024 return ret; 2025 } 2026 EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range); 2027 2028 /* 2029 * flush the inode to disk. 2030 */ 2031 int nfs_wb_all(struct inode *inode) 2032 { 2033 int ret; 2034 2035 trace_nfs_writeback_inode_enter(inode); 2036 2037 ret = filemap_write_and_wait(inode->i_mapping); 2038 if (ret) 2039 goto out; 2040 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2041 if (ret < 0) 2042 goto out; 2043 pnfs_sync_inode(inode, true); 2044 ret = 0; 2045 2046 out: 2047 trace_nfs_writeback_inode_exit(inode, ret); 2048 return ret; 2049 } 2050 EXPORT_SYMBOL_GPL(nfs_wb_all); 2051 2052 int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio) 2053 { 2054 struct nfs_page *req; 2055 int ret = 0; 2056 2057 folio_wait_writeback(folio); 2058 2059 /* blocking call to cancel all requests and join to a single (head) 2060 * request */ 2061 req = nfs_lock_and_join_requests(folio); 2062 2063 if (IS_ERR(req)) { 2064 ret = PTR_ERR(req); 2065 } else if (req) { 2066 /* all requests from this folio have been cancelled by 2067 * nfs_lock_and_join_requests, so just remove the head 2068 * request from the inode / page_private pointer and 2069 * release it */ 2070 nfs_inode_remove_request(req); 2071 nfs_unlock_and_release_request(req); 2072 } 2073 2074 return ret; 2075 } 2076 2077 /** 2078 * nfs_wb_folio - Write back all requests on one page 2079 * @inode: pointer to page 2080 * @folio: pointer to folio 2081 * 2082 * Assumes that the folio has been locked by the caller, and will 2083 * not unlock it. 2084 */ 2085 int nfs_wb_folio(struct inode *inode, struct folio *folio) 2086 { 2087 loff_t range_start = folio_file_pos(folio); 2088 loff_t range_end = range_start + (loff_t)folio_size(folio) - 1; 2089 struct writeback_control wbc = { 2090 .sync_mode = WB_SYNC_ALL, 2091 .nr_to_write = 0, 2092 .range_start = range_start, 2093 .range_end = range_end, 2094 }; 2095 int ret; 2096 2097 trace_nfs_writeback_folio(inode, folio); 2098 2099 for (;;) { 2100 folio_wait_writeback(folio); 2101 if (folio_clear_dirty_for_io(folio)) { 2102 ret = nfs_writepage_locked(folio, &wbc); 2103 if (ret < 0) 2104 goto out_error; 2105 continue; 2106 } 2107 ret = 0; 2108 if (!folio_test_private(folio)) 2109 break; 2110 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2111 if (ret < 0) 2112 goto out_error; 2113 } 2114 out_error: 2115 trace_nfs_writeback_folio_done(inode, folio, ret); 2116 return ret; 2117 } 2118 2119 #ifdef CONFIG_MIGRATION 2120 int nfs_migrate_folio(struct address_space *mapping, struct folio *dst, 2121 struct folio *src, enum migrate_mode mode) 2122 { 2123 /* 2124 * If the private flag is set, the folio is currently associated with 2125 * an in-progress read or write request. Don't try to migrate it. 2126 * 2127 * FIXME: we could do this in principle, but we'll need a way to ensure 2128 * that we can safely release the inode reference while holding 2129 * the folio lock. 2130 */ 2131 if (folio_test_private(src)) 2132 return -EBUSY; 2133 2134 if (folio_test_fscache(src)) { 2135 if (mode == MIGRATE_ASYNC) 2136 return -EBUSY; 2137 folio_wait_fscache(src); 2138 } 2139 2140 return migrate_folio(mapping, dst, src, mode); 2141 } 2142 #endif 2143 2144 int __init nfs_init_writepagecache(void) 2145 { 2146 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 2147 sizeof(struct nfs_pgio_header), 2148 0, SLAB_HWCACHE_ALIGN, 2149 NULL); 2150 if (nfs_wdata_cachep == NULL) 2151 return -ENOMEM; 2152 2153 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, 2154 nfs_wdata_cachep); 2155 if (nfs_wdata_mempool == NULL) 2156 goto out_destroy_write_cache; 2157 2158 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", 2159 sizeof(struct nfs_commit_data), 2160 0, SLAB_HWCACHE_ALIGN, 2161 NULL); 2162 if (nfs_cdata_cachep == NULL) 2163 goto out_destroy_write_mempool; 2164 2165 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, 2166 nfs_cdata_cachep); 2167 if (nfs_commit_mempool == NULL) 2168 goto out_destroy_commit_cache; 2169 2170 /* 2171 * NFS congestion size, scale with available memory. 2172 * 2173 * 64MB: 8192k 2174 * 128MB: 11585k 2175 * 256MB: 16384k 2176 * 512MB: 23170k 2177 * 1GB: 32768k 2178 * 2GB: 46340k 2179 * 4GB: 65536k 2180 * 8GB: 92681k 2181 * 16GB: 131072k 2182 * 2183 * This allows larger machines to have larger/more transfers. 2184 * Limit the default to 256M 2185 */ 2186 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); 2187 if (nfs_congestion_kb > 256*1024) 2188 nfs_congestion_kb = 256*1024; 2189 2190 return 0; 2191 2192 out_destroy_commit_cache: 2193 kmem_cache_destroy(nfs_cdata_cachep); 2194 out_destroy_write_mempool: 2195 mempool_destroy(nfs_wdata_mempool); 2196 out_destroy_write_cache: 2197 kmem_cache_destroy(nfs_wdata_cachep); 2198 return -ENOMEM; 2199 } 2200 2201 void nfs_destroy_writepagecache(void) 2202 { 2203 mempool_destroy(nfs_commit_mempool); 2204 kmem_cache_destroy(nfs_cdata_cachep); 2205 mempool_destroy(nfs_wdata_mempool); 2206 kmem_cache_destroy(nfs_wdata_cachep); 2207 } 2208 2209 static const struct nfs_rw_ops nfs_rw_write_ops = { 2210 .rw_alloc_header = nfs_writehdr_alloc, 2211 .rw_free_header = nfs_writehdr_free, 2212 .rw_done = nfs_writeback_done, 2213 .rw_result = nfs_writeback_result, 2214 .rw_initiate = nfs_initiate_write, 2215 }; 2216