1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/fs/nfs/write.c 4 * 5 * Write file data over NFS. 6 * 7 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de> 8 */ 9 10 #include <linux/types.h> 11 #include <linux/slab.h> 12 #include <linux/mm.h> 13 #include <linux/pagemap.h> 14 #include <linux/file.h> 15 #include <linux/writeback.h> 16 #include <linux/swap.h> 17 #include <linux/migrate.h> 18 19 #include <linux/sunrpc/clnt.h> 20 #include <linux/nfs_fs.h> 21 #include <linux/nfs_mount.h> 22 #include <linux/nfs_page.h> 23 #include <linux/backing-dev.h> 24 #include <linux/export.h> 25 #include <linux/freezer.h> 26 #include <linux/wait.h> 27 #include <linux/iversion.h> 28 #include <linux/filelock.h> 29 30 #include <linux/uaccess.h> 31 #include <linux/sched/mm.h> 32 33 #include "delegation.h" 34 #include "internal.h" 35 #include "iostat.h" 36 #include "nfs4_fs.h" 37 #include "fscache.h" 38 #include "pnfs.h" 39 40 #include "nfstrace.h" 41 42 #define NFSDBG_FACILITY NFSDBG_PAGECACHE 43 44 #define MIN_POOL_WRITE (32) 45 #define MIN_POOL_COMMIT (4) 46 47 struct nfs_io_completion { 48 void (*complete)(void *data); 49 void *data; 50 struct kref refcount; 51 }; 52 53 /* 54 * Local function declarations 55 */ 56 static void nfs_redirty_request(struct nfs_page *req); 57 static const struct rpc_call_ops nfs_commit_ops; 58 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 59 static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 60 static const struct nfs_rw_ops nfs_rw_write_ops; 61 static void nfs_inode_remove_request(struct nfs_page *req); 62 static void nfs_clear_request_commit(struct nfs_commit_info *cinfo, 63 struct nfs_page *req); 64 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 65 struct inode *inode); 66 67 static struct kmem_cache *nfs_wdata_cachep; 68 static mempool_t *nfs_wdata_mempool; 69 static struct kmem_cache *nfs_cdata_cachep; 70 static mempool_t *nfs_commit_mempool; 71 72 struct nfs_commit_data *nfs_commitdata_alloc(void) 73 { 74 struct nfs_commit_data *p; 75 76 p = kmem_cache_zalloc(nfs_cdata_cachep, nfs_io_gfp_mask()); 77 if (!p) { 78 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT); 79 if (!p) 80 return NULL; 81 memset(p, 0, sizeof(*p)); 82 } 83 INIT_LIST_HEAD(&p->pages); 84 return p; 85 } 86 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); 87 88 void nfs_commit_free(struct nfs_commit_data *p) 89 { 90 mempool_free(p, nfs_commit_mempool); 91 } 92 EXPORT_SYMBOL_GPL(nfs_commit_free); 93 94 static struct nfs_pgio_header *nfs_writehdr_alloc(void) 95 { 96 struct nfs_pgio_header *p; 97 98 p = kmem_cache_zalloc(nfs_wdata_cachep, nfs_io_gfp_mask()); 99 if (!p) { 100 p = mempool_alloc(nfs_wdata_mempool, GFP_NOWAIT); 101 if (!p) 102 return NULL; 103 memset(p, 0, sizeof(*p)); 104 } 105 p->rw_mode = FMODE_WRITE; 106 return p; 107 } 108 109 static void nfs_writehdr_free(struct nfs_pgio_header *hdr) 110 { 111 mempool_free(hdr, nfs_wdata_mempool); 112 } 113 114 static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags) 115 { 116 return kmalloc_obj(struct nfs_io_completion, gfp_flags); 117 } 118 119 static void nfs_io_completion_init(struct nfs_io_completion *ioc, 120 void (*complete)(void *), void *data) 121 { 122 ioc->complete = complete; 123 ioc->data = data; 124 kref_init(&ioc->refcount); 125 } 126 127 static void nfs_io_completion_release(struct kref *kref) 128 { 129 struct nfs_io_completion *ioc = container_of(kref, 130 struct nfs_io_completion, refcount); 131 ioc->complete(ioc->data); 132 kfree(ioc); 133 } 134 135 static void nfs_io_completion_get(struct nfs_io_completion *ioc) 136 { 137 if (ioc != NULL) 138 kref_get(&ioc->refcount); 139 } 140 141 static void nfs_io_completion_put(struct nfs_io_completion *ioc) 142 { 143 if (ioc != NULL) 144 kref_put(&ioc->refcount, nfs_io_completion_release); 145 } 146 147 static void 148 nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode) 149 { 150 if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) { 151 kref_get(&req->wb_kref); 152 atomic_long_inc(&NFS_I(inode)->nrequests); 153 } 154 } 155 156 static void nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode) 157 { 158 if (test_and_clear_bit(PG_REMOVE, &req->wb_flags)) 159 nfs_page_set_inode_ref(req, inode); 160 } 161 162 /** 163 * nfs_folio_find_head_request - find head request associated with a folio 164 * @folio: pointer to folio 165 * 166 * must be called while holding the inode lock. 167 * 168 * returns matching head request with reference held, or NULL if not found. 169 */ 170 static struct nfs_page *nfs_folio_find_head_request(struct folio *folio) 171 { 172 struct address_space *mapping = folio->mapping; 173 struct nfs_page *req; 174 175 if (!folio_test_private(folio)) 176 return NULL; 177 spin_lock(&mapping->i_private_lock); 178 req = folio->private; 179 if (req) { 180 WARN_ON_ONCE(req->wb_head != req); 181 kref_get(&req->wb_kref); 182 } 183 spin_unlock(&mapping->i_private_lock); 184 return req; 185 } 186 187 /* Adjust the file length if we're writing beyond the end */ 188 static void nfs_grow_file(struct folio *folio, unsigned int offset, 189 unsigned int count) 190 { 191 struct inode *inode = folio->mapping->host; 192 loff_t end, i_size; 193 pgoff_t end_index; 194 195 spin_lock(&inode->i_lock); 196 i_size = i_size_read(inode); 197 end_index = ((i_size - 1) >> folio_shift(folio)) << folio_order(folio); 198 if (i_size > 0 && folio->index < end_index) 199 goto out; 200 end = folio_pos(folio) + (loff_t)offset + (loff_t)count; 201 if (i_size >= end) 202 goto out; 203 trace_nfs_size_grow(inode, end); 204 i_size_write(inode, end); 205 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; 206 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE); 207 out: 208 /* Atomically update timestamps if they are delegated to us. */ 209 nfs_update_delegated_mtime_locked(inode); 210 spin_unlock(&inode->i_lock); 211 nfs_fscache_invalidate(inode, 0); 212 } 213 214 /* A writeback failed: mark the page as bad, and invalidate the page cache */ 215 static void nfs_set_pageerror(struct address_space *mapping) 216 { 217 struct inode *inode = mapping->host; 218 219 nfs_zap_mapping(mapping->host, mapping); 220 /* Force file size revalidation */ 221 spin_lock(&inode->i_lock); 222 nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED | 223 NFS_INO_INVALID_CHANGE | 224 NFS_INO_INVALID_SIZE); 225 spin_unlock(&inode->i_lock); 226 } 227 228 static void nfs_mapping_set_error(struct folio *folio, int error) 229 { 230 struct address_space *mapping = folio->mapping; 231 232 filemap_set_wb_err(mapping, error); 233 if (mapping->host) 234 errseq_set(&mapping->host->i_sb->s_wb_err, 235 error == -ENOSPC ? -ENOSPC : -EIO); 236 nfs_set_pageerror(mapping); 237 } 238 239 /* 240 * nfs_page_covers_folio 241 * @req: struct nfs_page 242 * 243 * Return true if the request covers the whole folio. 244 * Note that the caller should ensure all subrequests have been joined 245 */ 246 static bool nfs_page_group_covers_page(struct nfs_page *req) 247 { 248 unsigned int len = nfs_folio_length(nfs_page_to_folio(req)); 249 250 return req->wb_pgbase == 0 && req->wb_bytes == len; 251 } 252 253 /* We can set the PG_uptodate flag if we see that a write request 254 * covers the full page. 255 */ 256 static void nfs_mark_uptodate(struct nfs_page *req) 257 { 258 struct folio *folio = nfs_page_to_folio(req); 259 260 if (folio_test_uptodate(folio)) 261 return; 262 if (!nfs_page_group_covers_page(req)) 263 return; 264 folio_mark_uptodate(folio); 265 } 266 267 static int wb_priority(struct writeback_control *wbc) 268 { 269 int ret = 0; 270 271 if (wbc->sync_mode == WB_SYNC_ALL) 272 ret = FLUSH_COND_STABLE; 273 return ret; 274 } 275 276 /* 277 * NFS congestion control 278 */ 279 280 int nfs_congestion_kb; 281 282 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10)) 283 #define NFS_CONGESTION_OFF_THRESH \ 284 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2)) 285 286 static void nfs_folio_set_writeback(struct folio *folio) 287 { 288 struct nfs_server *nfss = NFS_SERVER(folio->mapping->host); 289 290 folio_start_writeback(folio); 291 if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH) 292 nfss->write_congested = 1; 293 } 294 295 static void nfs_folio_end_writeback(struct folio *folio) 296 { 297 struct nfs_server *nfss = NFS_SERVER(folio->mapping->host); 298 299 folio_end_writeback_no_dropbehind(folio); 300 if (atomic_long_dec_return(&nfss->writeback) < 301 NFS_CONGESTION_OFF_THRESH) { 302 nfss->write_congested = 0; 303 wake_up_all(&nfss->write_congestion_wait); 304 } 305 } 306 307 static void nfs_page_end_writeback(struct nfs_page *req) 308 { 309 if (nfs_page_group_sync_on_bit(req, PG_WB_END)) { 310 nfs_unlock_request(req); 311 nfs_folio_end_writeback(nfs_page_to_folio(req)); 312 } else 313 nfs_unlock_request(req); 314 } 315 316 /* 317 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests 318 * 319 * @destroy_list - request list (using wb_this_page) terminated by @old_head 320 * @old_head - the old head of the list 321 * 322 * All subrequests must be locked and removed from all lists, so at this point 323 * they are only "active" in this function, and possibly in nfs_wait_on_request 324 * with a reference held by some other context. 325 */ 326 static void 327 nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list, 328 struct nfs_page *old_head, 329 struct inode *inode) 330 { 331 while (destroy_list) { 332 struct nfs_page *subreq = destroy_list; 333 334 destroy_list = (subreq->wb_this_page == old_head) ? 335 NULL : subreq->wb_this_page; 336 337 /* Note: lock subreq in order to change subreq->wb_head */ 338 nfs_page_set_headlock(subreq); 339 WARN_ON_ONCE(old_head != subreq->wb_head); 340 341 /* make sure old group is not used */ 342 subreq->wb_this_page = subreq; 343 subreq->wb_head = subreq; 344 345 clear_bit(PG_REMOVE, &subreq->wb_flags); 346 347 /* Note: races with nfs_page_group_destroy() */ 348 if (!kref_read(&subreq->wb_kref)) { 349 /* Check if we raced with nfs_page_group_destroy() */ 350 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { 351 nfs_page_clear_headlock(subreq); 352 nfs_free_request(subreq); 353 } else 354 nfs_page_clear_headlock(subreq); 355 continue; 356 } 357 nfs_page_clear_headlock(subreq); 358 359 nfs_release_request(old_head); 360 361 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { 362 nfs_release_request(subreq); 363 atomic_long_dec(&NFS_I(inode)->nrequests); 364 } 365 366 /* subreq is now totally disconnected from page group or any 367 * write / commit lists. last chance to wake any waiters */ 368 nfs_unlock_and_release_request(subreq); 369 } 370 } 371 372 /* 373 * nfs_join_page_group - destroy subrequests of the head req 374 * @head: the page used to lookup the "page group" of nfs_page structures 375 * @inode: Inode to which the request belongs. 376 * 377 * This function joins all sub requests to the head request by first 378 * locking all requests in the group, cancelling any pending operations 379 * and finally updating the head request to cover the whole range covered by 380 * the (former) group. All subrequests are removed from any write or commit 381 * lists, unlinked from the group and destroyed. 382 */ 383 void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo, 384 struct inode *inode) 385 { 386 struct nfs_page *subreq; 387 struct nfs_page *destroy_list = NULL; 388 unsigned int pgbase, off, bytes; 389 390 pgbase = head->wb_pgbase; 391 bytes = head->wb_bytes; 392 off = head->wb_offset; 393 for (subreq = head->wb_this_page; subreq != head; 394 subreq = subreq->wb_this_page) { 395 /* Subrequests should always form a contiguous range */ 396 if (pgbase > subreq->wb_pgbase) { 397 off -= pgbase - subreq->wb_pgbase; 398 bytes += pgbase - subreq->wb_pgbase; 399 pgbase = subreq->wb_pgbase; 400 } 401 bytes = max(subreq->wb_pgbase + subreq->wb_bytes 402 - pgbase, bytes); 403 } 404 405 /* Set the head request's range to cover the former page group */ 406 head->wb_pgbase = pgbase; 407 head->wb_bytes = bytes; 408 head->wb_offset = off; 409 410 /* Now that all requests are locked, make sure they aren't on any list. 411 * Commit list removal accounting is done after locks are dropped */ 412 subreq = head; 413 do { 414 nfs_clear_request_commit(cinfo, subreq); 415 subreq = subreq->wb_this_page; 416 } while (subreq != head); 417 418 /* unlink subrequests from head, destroy them later */ 419 if (head->wb_this_page != head) { 420 /* destroy list will be terminated by head */ 421 destroy_list = head->wb_this_page; 422 head->wb_this_page = head; 423 } 424 425 nfs_destroy_unlinked_subrequests(destroy_list, head, inode); 426 } 427 428 /** 429 * nfs_wait_on_request - Wait for a request to complete. 430 * @req: request to wait upon. 431 * 432 * Interruptible by fatal signals only. 433 * The user is responsible for holding a count on the request. 434 */ 435 static int nfs_wait_on_request(struct nfs_page *req) 436 { 437 if (!test_bit(PG_BUSY, &req->wb_flags)) 438 return 0; 439 set_bit(PG_CONTENDED2, &req->wb_flags); 440 smp_mb__after_atomic(); 441 return wait_on_bit_io(&req->wb_flags, PG_BUSY, 442 TASK_UNINTERRUPTIBLE); 443 } 444 445 /* 446 * nfs_unroll_locks - unlock all newly locked reqs and wait on @req 447 * @head: head request of page group, must be holding head lock 448 * @req: request that couldn't lock and needs to wait on the req bit lock 449 * 450 * This is a helper function for nfs_lock_and_join_requests 451 * returns 0 on success, < 0 on error. 452 */ 453 static void 454 nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req) 455 { 456 struct nfs_page *tmp; 457 458 /* relinquish all the locks successfully grabbed this run */ 459 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { 460 if (!kref_read(&tmp->wb_kref)) 461 continue; 462 nfs_unlock_and_release_request(tmp); 463 } 464 } 465 466 /* 467 * nfs_page_group_lock_subreq - try to lock a subrequest 468 * @head: head request of page group 469 * @subreq: request to lock 470 * 471 * This is a helper function for nfs_lock_and_join_requests which 472 * must be called with the head request and page group both locked. 473 * On error, it returns with the page group unlocked. 474 */ 475 static int 476 nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq) 477 { 478 int ret; 479 480 if (!kref_get_unless_zero(&subreq->wb_kref)) 481 return 0; 482 while (!nfs_lock_request(subreq)) { 483 nfs_page_group_unlock(head); 484 ret = nfs_wait_on_request(subreq); 485 if (!ret) 486 ret = nfs_page_group_lock(head); 487 if (ret < 0) { 488 nfs_unroll_locks(head, subreq); 489 nfs_release_request(subreq); 490 return ret; 491 } 492 } 493 return 0; 494 } 495 496 /* 497 * nfs_lock_and_join_requests - join all subreqs to the head req 498 * @folio: the folio used to lookup the "page group" of nfs_page structures 499 * 500 * This function joins all sub requests to the head request by first 501 * locking all requests in the group, cancelling any pending operations 502 * and finally updating the head request to cover the whole range covered by 503 * the (former) group. All subrequests are removed from any write or commit 504 * lists, unlinked from the group and destroyed. 505 * 506 * Returns a locked, referenced pointer to the head request - which after 507 * this call is guaranteed to be the only request associated with the page. 508 * Returns NULL if no requests are found for @folio, or a ERR_PTR if an 509 * error was encountered. 510 */ 511 static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio) 512 { 513 struct inode *inode = folio->mapping->host; 514 struct nfs_page *head, *subreq; 515 struct nfs_commit_info cinfo; 516 int ret; 517 518 /* 519 * A reference is taken only on the head request which acts as a 520 * reference to the whole page group - the group will not be destroyed 521 * until the head reference is released. 522 */ 523 retry: 524 head = nfs_folio_find_head_request(folio); 525 if (!head) 526 return NULL; 527 528 while (!nfs_lock_request(head)) { 529 ret = nfs_wait_on_request(head); 530 if (ret < 0) { 531 nfs_release_request(head); 532 return ERR_PTR(ret); 533 } 534 } 535 536 ret = nfs_page_group_lock(head); 537 if (ret < 0) 538 goto out_unlock; 539 540 /* Ensure that nobody removed the request before we locked it */ 541 if (head != folio->private) { 542 nfs_page_group_unlock(head); 543 nfs_unlock_and_release_request(head); 544 goto retry; 545 } 546 547 nfs_cancel_remove_inode(head, inode); 548 549 /* lock each request in the page group */ 550 for (subreq = head->wb_this_page; 551 subreq != head; 552 subreq = subreq->wb_this_page) { 553 ret = nfs_page_group_lock_subreq(head, subreq); 554 if (ret < 0) 555 goto out_unlock; 556 } 557 558 nfs_page_group_unlock(head); 559 560 nfs_init_cinfo_from_inode(&cinfo, inode); 561 nfs_join_page_group(head, &cinfo, inode); 562 return head; 563 564 out_unlock: 565 nfs_unlock_and_release_request(head); 566 return ERR_PTR(ret); 567 } 568 569 static void nfs_write_error(struct nfs_page *req, int error) 570 { 571 trace_nfs_write_error(nfs_page_to_inode(req), req, error); 572 nfs_mapping_set_error(nfs_page_to_folio(req), error); 573 nfs_inode_remove_request(req); 574 nfs_page_end_writeback(req); 575 nfs_release_request(req); 576 } 577 578 /* 579 * Find an associated nfs write request, and prepare to flush it out 580 * May return an error if the user signalled nfs_wait_on_request(). 581 */ 582 static int nfs_do_writepage(struct folio *folio, struct writeback_control *wbc, 583 struct nfs_pageio_descriptor *pgio) 584 { 585 struct nfs_page *req; 586 int ret; 587 588 nfs_pageio_cond_complete(pgio, folio->index); 589 590 req = nfs_lock_and_join_requests(folio); 591 if (!req) 592 return 0; 593 if (IS_ERR(req)) 594 return PTR_ERR(req); 595 596 trace_nfs_do_writepage(req); 597 nfs_folio_set_writeback(folio); 598 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); 599 600 /* If there is a fatal error that covers this write, just exit */ 601 ret = pgio->pg_error; 602 if (nfs_error_is_fatal_on_server(ret)) 603 goto out_launder; 604 605 if (!nfs_pageio_add_request(pgio, req)) { 606 ret = pgio->pg_error; 607 /* 608 * Remove the problematic req upon fatal errors on the server 609 */ 610 if (nfs_error_is_fatal_on_server(ret)) 611 goto out_launder; 612 folio_redirty_for_writepage(wbc, folio); 613 nfs_redirty_request(req); 614 pgio->pg_error = 0; 615 return ret; 616 } 617 618 nfs_add_stats(folio->mapping->host, NFSIOS_WRITEPAGES, 1); 619 return 0; 620 621 out_launder: 622 nfs_write_error(req, ret); 623 return 0; 624 } 625 626 /* 627 * Write an mmapped page to the server. 628 */ 629 static int nfs_writepage_locked(struct folio *folio, 630 struct writeback_control *wbc) 631 { 632 struct nfs_pageio_descriptor pgio; 633 struct inode *inode = folio->mapping->host; 634 int err; 635 636 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE); 637 nfs_pageio_init_write(&pgio, inode, 0, false, 638 &nfs_async_write_completion_ops); 639 err = nfs_do_writepage(folio, wbc, &pgio); 640 pgio.pg_error = 0; 641 nfs_pageio_complete(&pgio); 642 return err; 643 } 644 645 static void nfs_io_completion_commit(void *inode) 646 { 647 nfs_commit_inode(inode, 0); 648 } 649 650 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) 651 { 652 struct inode *inode = mapping->host; 653 struct nfs_pageio_descriptor pgio; 654 struct nfs_io_completion *ioc = NULL; 655 unsigned int mntflags = NFS_SERVER(inode)->flags; 656 struct nfs_server *nfss = NFS_SERVER(inode); 657 int priority = 0; 658 int err; 659 660 trace_nfs_writepages(inode, wbc->range_start, wbc->range_end - wbc->range_start); 661 662 /* Wait with writeback until write congestion eases */ 663 if (wbc->sync_mode == WB_SYNC_NONE && nfss->write_congested) { 664 err = wait_event_killable(nfss->write_congestion_wait, 665 nfss->write_congested == 0); 666 if (err) 667 goto out_err; 668 } 669 670 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); 671 672 if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate || 673 wbc->for_background || wbc->for_sync) { 674 ioc = nfs_io_completion_alloc(GFP_KERNEL); 675 if (ioc) 676 nfs_io_completion_init(ioc, nfs_io_completion_commit, 677 inode); 678 priority = wb_priority(wbc); 679 } 680 681 do { 682 struct folio *folio = NULL; 683 684 nfs_pageio_init_write(&pgio, inode, priority, false, 685 &nfs_async_write_completion_ops); 686 pgio.pg_io_completion = ioc; 687 while ((folio = writeback_iter(mapping, wbc, folio, &err))) { 688 err = nfs_do_writepage(folio, wbc, &pgio); 689 folio_unlock(folio); 690 } 691 pgio.pg_error = 0; 692 nfs_pageio_complete(&pgio); 693 if (err == -EAGAIN && mntflags & NFS_MOUNT_SOFTERR) 694 break; 695 } while (err < 0 && !nfs_error_is_fatal(err)); 696 nfs_io_completion_put(ioc); 697 698 if (err > 0) 699 err = 0; 700 out_err: 701 trace_nfs_writepages_done(inode, wbc->range_start, wbc->range_end - wbc->range_start, err); 702 return err; 703 } 704 705 /* 706 * Insert a write request into an inode 707 */ 708 static void nfs_inode_add_request(struct nfs_page *req) 709 { 710 struct folio *folio = nfs_page_to_folio(req); 711 struct address_space *mapping = folio->mapping; 712 struct nfs_inode *nfsi = NFS_I(mapping->host); 713 714 WARN_ON_ONCE(req->wb_this_page != req); 715 716 /* Lock the request! */ 717 nfs_lock_request(req); 718 spin_lock(&mapping->i_private_lock); 719 set_bit(PG_MAPPED, &req->wb_flags); 720 folio_set_private(folio); 721 folio->private = req; 722 spin_unlock(&mapping->i_private_lock); 723 atomic_long_inc(&nfsi->nrequests); 724 /* this a head request for a page group - mark it as having an 725 * extra reference so sub groups can follow suit. 726 * This flag also informs pgio layer when to bump nrequests when 727 * adding subrequests. */ 728 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); 729 kref_get(&req->wb_kref); 730 } 731 732 /* 733 * Remove a write request from an inode 734 */ 735 static void nfs_inode_remove_request(struct nfs_page *req) 736 { 737 struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req)); 738 739 nfs_page_group_lock(req); 740 if (nfs_page_group_sync_on_bit_locked(req, PG_REMOVE)) { 741 struct folio *folio = nfs_page_to_folio(req->wb_head); 742 struct address_space *mapping = folio->mapping; 743 744 spin_lock(&mapping->i_private_lock); 745 if (likely(folio)) { 746 folio->private = NULL; 747 folio_clear_private(folio); 748 clear_bit(PG_MAPPED, &req->wb_head->wb_flags); 749 } 750 spin_unlock(&mapping->i_private_lock); 751 752 folio_end_dropbehind(folio); 753 } 754 nfs_page_group_unlock(req); 755 756 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) { 757 atomic_long_dec(&nfsi->nrequests); 758 nfs_release_request(req); 759 } 760 } 761 762 static void nfs_mark_request_dirty(struct nfs_page *req) 763 { 764 struct folio *folio = nfs_page_to_folio(req); 765 if (folio) 766 filemap_dirty_folio(folio_mapping(folio), folio); 767 } 768 769 /** 770 * nfs_request_add_commit_list_locked - add request to a commit list 771 * @req: pointer to a struct nfs_page 772 * @dst: commit list head 773 * @cinfo: holds list lock and accounting info 774 * 775 * This sets the PG_CLEAN bit, updates the cinfo count of 776 * number of outstanding requests requiring a commit as well as 777 * the MM page stats. 778 * 779 * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the 780 * nfs_page lock. 781 */ 782 void 783 nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst, 784 struct nfs_commit_info *cinfo) 785 { 786 set_bit(PG_CLEAN, &req->wb_flags); 787 nfs_list_add_request(req, dst); 788 atomic_long_inc(&cinfo->mds->ncommit); 789 } 790 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked); 791 792 /** 793 * nfs_request_add_commit_list - add request to a commit list 794 * @req: pointer to a struct nfs_page 795 * @cinfo: holds list lock and accounting info 796 * 797 * This sets the PG_CLEAN bit, updates the cinfo count of 798 * number of outstanding requests requiring a commit as well as 799 * the MM page stats. 800 * 801 * The caller must _not_ hold the cinfo->lock, but must be 802 * holding the nfs_page lock. 803 */ 804 void 805 nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo) 806 { 807 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 808 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); 809 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 810 nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo); 811 } 812 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 813 814 /** 815 * nfs_request_remove_commit_list - Remove request from a commit list 816 * @req: pointer to a nfs_page 817 * @cinfo: holds list lock and accounting info 818 * 819 * This clears the PG_CLEAN bit, and updates the cinfo's count of 820 * number of outstanding requests requiring a commit 821 * It does not update the MM page stats. 822 * 823 * The caller _must_ hold the cinfo->lock and the nfs_page lock. 824 */ 825 void 826 nfs_request_remove_commit_list(struct nfs_page *req, 827 struct nfs_commit_info *cinfo) 828 { 829 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) 830 return; 831 nfs_list_remove_request(req); 832 atomic_long_dec(&cinfo->mds->ncommit); 833 } 834 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); 835 836 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo, 837 struct inode *inode) 838 { 839 cinfo->inode = inode; 840 cinfo->mds = &NFS_I(inode)->commit_info; 841 cinfo->ds = pnfs_get_ds_info(inode); 842 cinfo->dreq = NULL; 843 cinfo->completion_ops = &nfs_commit_completion_ops; 844 } 845 846 void nfs_init_cinfo(struct nfs_commit_info *cinfo, 847 struct inode *inode, 848 struct nfs_direct_req *dreq) 849 { 850 if (dreq) 851 nfs_init_cinfo_from_dreq(cinfo, dreq); 852 else 853 nfs_init_cinfo_from_inode(cinfo, inode); 854 } 855 EXPORT_SYMBOL_GPL(nfs_init_cinfo); 856 857 /* 858 * Add a request to the inode's commit list. 859 */ 860 void 861 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg, 862 struct nfs_commit_info *cinfo, u32 ds_commit_idx) 863 { 864 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx)) 865 return; 866 nfs_request_add_commit_list(req, cinfo); 867 } 868 869 static void nfs_folio_clear_commit(struct folio *folio) 870 { 871 if (folio) { 872 long nr = folio_nr_pages(folio); 873 874 node_stat_mod_folio(folio, NR_WRITEBACK, -nr); 875 bdi_wb_stat_mod(folio->mapping->host, WB_WRITEBACK, -nr); 876 } 877 } 878 879 /* Called holding the request lock on @req */ 880 static void nfs_clear_request_commit(struct nfs_commit_info *cinfo, 881 struct nfs_page *req) 882 { 883 if (test_bit(PG_CLEAN, &req->wb_flags)) { 884 struct nfs_open_context *ctx = nfs_req_openctx(req); 885 struct inode *inode = d_inode(ctx->dentry); 886 887 mutex_lock(&NFS_I(inode)->commit_mutex); 888 if (!pnfs_clear_request_commit(req, cinfo)) { 889 nfs_request_remove_commit_list(req, cinfo); 890 } 891 mutex_unlock(&NFS_I(inode)->commit_mutex); 892 nfs_folio_clear_commit(nfs_page_to_folio(req)); 893 } 894 } 895 896 int nfs_write_need_commit(struct nfs_pgio_header *hdr) 897 { 898 if (hdr->verf.committed == NFS_DATA_SYNC) 899 return hdr->lseg == NULL; 900 return hdr->verf.committed != NFS_FILE_SYNC; 901 } 902 903 static void nfs_async_write_init(struct nfs_pgio_header *hdr) 904 { 905 nfs_io_completion_get(hdr->io_completion); 906 } 907 908 static void nfs_write_completion(struct nfs_pgio_header *hdr) 909 { 910 struct nfs_commit_info cinfo; 911 unsigned long bytes = 0; 912 913 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 914 goto out; 915 nfs_init_cinfo_from_inode(&cinfo, hdr->inode); 916 while (!list_empty(&hdr->pages)) { 917 struct nfs_page *req = nfs_list_entry(hdr->pages.next); 918 919 bytes += req->wb_bytes; 920 nfs_list_remove_request(req); 921 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && 922 (hdr->good_bytes < bytes)) { 923 trace_nfs_comp_error(hdr->inode, req, hdr->error); 924 nfs_mapping_set_error(nfs_page_to_folio(req), 925 hdr->error); 926 goto remove_req; 927 } 928 if (nfs_write_need_commit(hdr)) { 929 struct nfs_open_context *ctx = 930 hdr->req->wb_lock_context->open_context; 931 932 /* Reset wb_nio, since the write was successful. */ 933 req->wb_nio = 0; 934 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); 935 clear_bit(NFS_CONTEXT_WRITE_SYNC, &ctx->flags); 936 nfs_mark_request_commit(req, hdr->lseg, &cinfo, 937 hdr->ds_commit_idx); 938 goto next; 939 } 940 remove_req: 941 nfs_inode_remove_request(req); 942 next: 943 nfs_page_end_writeback(req); 944 nfs_release_request(req); 945 } 946 out: 947 nfs_io_completion_put(hdr->io_completion); 948 hdr->release(hdr); 949 } 950 951 unsigned long 952 nfs_reqs_to_commit(struct nfs_commit_info *cinfo) 953 { 954 return atomic_long_read(&cinfo->mds->ncommit); 955 } 956 957 /* NFS_I(cinfo->inode)->commit_mutex held by caller */ 958 int 959 nfs_scan_commit_list(struct list_head *src, struct list_head *dst, 960 struct nfs_commit_info *cinfo, int max) 961 { 962 struct nfs_page *req, *tmp; 963 int ret = 0; 964 965 list_for_each_entry_safe(req, tmp, src, wb_list) { 966 kref_get(&req->wb_kref); 967 if (!nfs_lock_request(req)) { 968 nfs_release_request(req); 969 continue; 970 } 971 nfs_request_remove_commit_list(req, cinfo); 972 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); 973 nfs_list_add_request(req, dst); 974 ret++; 975 if ((ret == max) && !cinfo->dreq) 976 break; 977 cond_resched(); 978 } 979 return ret; 980 } 981 EXPORT_SYMBOL_GPL(nfs_scan_commit_list); 982 983 /* 984 * nfs_scan_commit - Scan an inode for commit requests 985 * @inode: NFS inode to scan 986 * @dst: mds destination list 987 * @cinfo: mds and ds lists of reqs ready to commit 988 * 989 * Moves requests from the inode's 'commit' request list. 990 * The requests are *not* checked to ensure that they form a contiguous set. 991 */ 992 int 993 nfs_scan_commit(struct inode *inode, struct list_head *dst, 994 struct nfs_commit_info *cinfo) 995 { 996 int ret = 0; 997 998 if (!atomic_long_read(&cinfo->mds->ncommit)) 999 return 0; 1000 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); 1001 if (atomic_long_read(&cinfo->mds->ncommit) > 0) { 1002 const int max = INT_MAX; 1003 1004 ret = nfs_scan_commit_list(&cinfo->mds->list, dst, 1005 cinfo, max); 1006 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); 1007 } 1008 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); 1009 return ret; 1010 } 1011 1012 /* 1013 * Search for an existing write request, and attempt to update 1014 * it to reflect a new dirty region on a given page. 1015 * 1016 * If the attempt fails, then the existing request is flushed out 1017 * to disk. 1018 */ 1019 static struct nfs_page *nfs_try_to_update_request(struct folio *folio, 1020 unsigned int offset, 1021 unsigned int bytes) 1022 { 1023 struct nfs_page *req; 1024 unsigned int rqend; 1025 unsigned int end; 1026 int error; 1027 1028 trace_nfs_try_to_update_request(folio_inode(folio), offset, bytes); 1029 end = offset + bytes; 1030 1031 req = nfs_lock_and_join_requests(folio); 1032 if (IS_ERR_OR_NULL(req)) 1033 goto out; 1034 1035 rqend = req->wb_offset + req->wb_bytes; 1036 /* 1037 * Tell the caller to flush out the request if 1038 * the offsets are non-contiguous. 1039 * Note: nfs_flush_incompatible() will already 1040 * have flushed out requests having wrong owners. 1041 */ 1042 if (offset > rqend || end < req->wb_offset) 1043 goto out_flushme; 1044 1045 /* Okay, the request matches. Update the region */ 1046 if (offset < req->wb_offset) { 1047 req->wb_offset = offset; 1048 req->wb_pgbase = offset; 1049 } 1050 if (end > rqend) 1051 req->wb_bytes = end - req->wb_offset; 1052 else 1053 req->wb_bytes = rqend - req->wb_offset; 1054 req->wb_nio = 0; 1055 out: 1056 trace_nfs_try_to_update_request_done(folio_inode(folio), offset, bytes, 1057 PTR_ERR_OR_ZERO(req)); 1058 return req; 1059 out_flushme: 1060 /* 1061 * Note: we mark the request dirty here because 1062 * nfs_lock_and_join_requests() cannot preserve 1063 * commit flags, so we have to replay the write. 1064 */ 1065 nfs_mark_request_dirty(req); 1066 nfs_unlock_and_release_request(req); 1067 error = nfs_wb_folio(folio->mapping->host, folio); 1068 trace_nfs_try_to_update_request_done(folio_inode(folio), offset, bytes, error); 1069 return (error < 0) ? ERR_PTR(error) : NULL; 1070 } 1071 1072 /* 1073 * Try to update an existing write request, or create one if there is none. 1074 * 1075 * Note: Should always be called with the Page Lock held to prevent races 1076 * if we have to add a new request. Also assumes that the caller has 1077 * already called nfs_flush_incompatible() if necessary. 1078 */ 1079 static struct nfs_page *nfs_setup_write_request(struct nfs_open_context *ctx, 1080 struct folio *folio, 1081 unsigned int offset, 1082 unsigned int bytes) 1083 { 1084 struct nfs_page *req; 1085 1086 req = nfs_try_to_update_request(folio, offset, bytes); 1087 if (req != NULL) 1088 goto out; 1089 req = nfs_page_create_from_folio(ctx, folio, offset, bytes); 1090 if (IS_ERR(req)) 1091 goto out; 1092 nfs_inode_add_request(req); 1093 out: 1094 return req; 1095 } 1096 1097 static int nfs_writepage_setup(struct nfs_open_context *ctx, 1098 struct folio *folio, unsigned int offset, 1099 unsigned int count) 1100 { 1101 struct nfs_page *req; 1102 1103 req = nfs_setup_write_request(ctx, folio, offset, count); 1104 if (IS_ERR(req)) 1105 return PTR_ERR(req); 1106 trace_nfs_writepage_setup(req); 1107 /* Update file length */ 1108 nfs_grow_file(folio, offset, count); 1109 nfs_mark_uptodate(req); 1110 nfs_mark_request_dirty(req); 1111 nfs_unlock_and_release_request(req); 1112 return 0; 1113 } 1114 1115 int nfs_flush_incompatible(struct file *file, struct folio *folio) 1116 { 1117 struct nfs_open_context *ctx = nfs_file_open_context(file); 1118 struct nfs_lock_context *l_ctx; 1119 struct file_lock_context *flctx = locks_inode_context(file_inode(file)); 1120 struct nfs_page *req; 1121 int do_flush, status; 1122 /* 1123 * Look for a request corresponding to this page. If there 1124 * is one, and it belongs to another file, we flush it out 1125 * before we try to copy anything into the page. Do this 1126 * due to the lack of an ACCESS-type call in NFSv2. 1127 * Also do the same if we find a request from an existing 1128 * dropped page. 1129 */ 1130 do { 1131 req = nfs_folio_find_head_request(folio); 1132 if (req == NULL) 1133 return 0; 1134 l_ctx = req->wb_lock_context; 1135 do_flush = nfs_page_to_folio(req) != folio || 1136 !nfs_match_open_context(nfs_req_openctx(req), ctx); 1137 if (l_ctx && flctx && 1138 !(list_empty_careful(&flctx->flc_posix) && 1139 list_empty_careful(&flctx->flc_flock))) { 1140 do_flush |= l_ctx->lockowner != current->files; 1141 } 1142 nfs_release_request(req); 1143 if (!do_flush) 1144 return 0; 1145 status = nfs_wb_folio(folio->mapping->host, folio); 1146 } while (status == 0); 1147 return status; 1148 } 1149 1150 /* 1151 * Avoid buffered writes when a open context credential's key would 1152 * expire soon. 1153 * 1154 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL. 1155 * 1156 * Return 0 and set a credential flag which triggers the inode to flush 1157 * and performs NFS_FILE_SYNC writes if the key will expired within 1158 * RPC_KEY_EXPIRE_TIMEO. 1159 */ 1160 int 1161 nfs_key_timeout_notify(struct file *filp, struct inode *inode) 1162 { 1163 struct nfs_open_context *ctx = nfs_file_open_context(filp); 1164 1165 if (nfs_ctx_key_to_expire(ctx, inode) && 1166 !rcu_access_pointer(ctx->ll_cred)) 1167 /* Already expired! */ 1168 return -EACCES; 1169 return 0; 1170 } 1171 1172 /* 1173 * Test if the open context credential key is marked to expire soon. 1174 */ 1175 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode) 1176 { 1177 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; 1178 struct rpc_cred *cred, *new, *old = NULL; 1179 struct auth_cred acred = { 1180 .cred = ctx->cred, 1181 }; 1182 bool ret = false; 1183 1184 rcu_read_lock(); 1185 cred = rcu_dereference(ctx->ll_cred); 1186 if (cred && !(cred->cr_ops->crkey_timeout && 1187 cred->cr_ops->crkey_timeout(cred))) 1188 goto out; 1189 rcu_read_unlock(); 1190 1191 new = auth->au_ops->lookup_cred(auth, &acred, 0); 1192 if (new == cred) { 1193 put_rpccred(new); 1194 return true; 1195 } 1196 if (IS_ERR_OR_NULL(new)) { 1197 new = NULL; 1198 ret = true; 1199 } else if (new->cr_ops->crkey_timeout && 1200 new->cr_ops->crkey_timeout(new)) 1201 ret = true; 1202 1203 rcu_read_lock(); 1204 old = rcu_dereference_protected(xchg(&ctx->ll_cred, 1205 RCU_INITIALIZER(new)), 1); 1206 out: 1207 rcu_read_unlock(); 1208 put_rpccred(old); 1209 return ret; 1210 } 1211 1212 /* 1213 * If the page cache is marked as unsafe or invalid, then we can't rely on 1214 * the PageUptodate() flag. In this case, we will need to turn off 1215 * write optimisations that depend on the page contents being correct. 1216 */ 1217 static bool nfs_folio_write_uptodate(struct folio *folio, unsigned int pagelen) 1218 { 1219 struct inode *inode = folio->mapping->host; 1220 struct nfs_inode *nfsi = NFS_I(inode); 1221 1222 if (nfs_have_delegated_attributes(inode)) 1223 goto out; 1224 if (nfsi->cache_validity & 1225 (NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE)) 1226 return false; 1227 smp_rmb(); 1228 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags) && pagelen != 0) 1229 return false; 1230 out: 1231 if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0) 1232 return false; 1233 return folio_test_uptodate(folio) != 0; 1234 } 1235 1236 static bool 1237 is_whole_file_wrlock(struct file_lock *fl) 1238 { 1239 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && 1240 lock_is_write(fl); 1241 } 1242 1243 /* If we know the page is up to date, and we're not using byte range locks (or 1244 * if we have the whole file locked for writing), it may be more efficient to 1245 * extend the write to cover the entire page in order to avoid fragmentation 1246 * inefficiencies. 1247 * 1248 * If the file is opened for synchronous writes then we can just skip the rest 1249 * of the checks. 1250 */ 1251 static int nfs_can_extend_write(struct file *file, struct folio *folio, 1252 unsigned int pagelen) 1253 { 1254 struct inode *inode = file_inode(file); 1255 struct file_lock_context *flctx = locks_inode_context(inode); 1256 struct file_lock *fl; 1257 int ret; 1258 unsigned int mntflags = NFS_SERVER(inode)->flags; 1259 1260 if (mntflags & NFS_MOUNT_NO_ALIGNWRITE) 1261 return 0; 1262 if (file->f_flags & O_DSYNC) 1263 return 0; 1264 if (!nfs_folio_write_uptodate(folio, pagelen)) 1265 return 0; 1266 if (nfs_have_write_delegation(inode)) 1267 return 1; 1268 if (!flctx || (list_empty_careful(&flctx->flc_flock) && 1269 list_empty_careful(&flctx->flc_posix))) 1270 return 1; 1271 1272 /* Check to see if there are whole file write locks */ 1273 ret = 0; 1274 spin_lock(&flctx->flc_lock); 1275 if (!list_empty(&flctx->flc_posix)) { 1276 fl = list_first_entry(&flctx->flc_posix, struct file_lock, 1277 c.flc_list); 1278 if (is_whole_file_wrlock(fl)) 1279 ret = 1; 1280 } else if (!list_empty(&flctx->flc_flock)) { 1281 fl = list_first_entry(&flctx->flc_flock, struct file_lock, 1282 c.flc_list); 1283 if (lock_is_write(fl)) 1284 ret = 1; 1285 } 1286 spin_unlock(&flctx->flc_lock); 1287 return ret; 1288 } 1289 1290 /* 1291 * Update and possibly write a cached page of an NFS file. 1292 * 1293 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad 1294 * things with a page scheduled for an RPC call (e.g. invalidate it). 1295 */ 1296 int nfs_update_folio(struct file *file, struct folio *folio, 1297 unsigned int offset, unsigned int count) 1298 { 1299 struct nfs_open_context *ctx = nfs_file_open_context(file); 1300 struct address_space *mapping = folio->mapping; 1301 struct inode *inode = mapping->host; 1302 unsigned int pagelen = nfs_folio_length(folio); 1303 int status = 0; 1304 1305 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE); 1306 1307 trace_nfs_update_folio(inode, offset, count); 1308 1309 dprintk("NFS: nfs_update_folio(%pD2 %d@%lld)\n", file, count, 1310 (long long)(folio_pos(folio) + offset)); 1311 1312 if (!count) 1313 goto out; 1314 1315 if (nfs_can_extend_write(file, folio, pagelen)) { 1316 unsigned int end = count + offset; 1317 1318 offset = round_down(offset, PAGE_SIZE); 1319 if (end < pagelen) 1320 end = min(round_up(end, PAGE_SIZE), pagelen); 1321 count = end - offset; 1322 } 1323 1324 status = nfs_writepage_setup(ctx, folio, offset, count); 1325 if (status < 0) 1326 nfs_set_pageerror(mapping); 1327 out: 1328 trace_nfs_update_folio_done(inode, offset, count, status); 1329 dprintk("NFS: nfs_update_folio returns %d (isize %lld)\n", 1330 status, (long long)i_size_read(inode)); 1331 return status; 1332 } 1333 1334 static int flush_task_priority(int how) 1335 { 1336 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { 1337 case FLUSH_HIGHPRI: 1338 return RPC_PRIORITY_HIGH; 1339 case FLUSH_LOWPRI: 1340 return RPC_PRIORITY_LOW; 1341 } 1342 return RPC_PRIORITY_NORMAL; 1343 } 1344 1345 static void nfs_initiate_write(struct nfs_pgio_header *hdr, 1346 struct rpc_message *msg, 1347 const struct nfs_rpc_ops *rpc_ops, 1348 struct rpc_task_setup *task_setup_data, int how) 1349 { 1350 int priority = flush_task_priority(how); 1351 1352 if (IS_SWAPFILE(hdr->inode)) 1353 task_setup_data->flags |= RPC_TASK_SWAPPER; 1354 task_setup_data->priority = priority; 1355 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); 1356 trace_nfs_initiate_write(hdr); 1357 } 1358 1359 /* If a nfs_flush_* function fails, it should remove reqs from @head and 1360 * call this on each, which will prepare them to be retried on next 1361 * writeback using standard nfs. 1362 */ 1363 static void nfs_redirty_request(struct nfs_page *req) 1364 { 1365 struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req)); 1366 1367 /* Bump the transmission count */ 1368 req->wb_nio++; 1369 nfs_mark_request_dirty(req); 1370 atomic_long_inc(&nfsi->redirtied_pages); 1371 nfs_page_end_writeback(req); 1372 nfs_release_request(req); 1373 } 1374 1375 static void nfs_async_write_error(struct list_head *head, int error) 1376 { 1377 struct nfs_page *req; 1378 1379 while (!list_empty(head)) { 1380 req = nfs_list_entry(head->next); 1381 nfs_list_remove_request(req); 1382 if (nfs_error_is_fatal_on_server(error)) 1383 nfs_write_error(req, error); 1384 else 1385 nfs_redirty_request(req); 1386 } 1387 } 1388 1389 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr) 1390 { 1391 nfs_async_write_error(&hdr->pages, 0); 1392 } 1393 1394 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = { 1395 .init_hdr = nfs_async_write_init, 1396 .error_cleanup = nfs_async_write_error, 1397 .completion = nfs_write_completion, 1398 .reschedule_io = nfs_async_write_reschedule_io, 1399 }; 1400 1401 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, 1402 struct inode *inode, int ioflags, bool force_mds, 1403 const struct nfs_pgio_completion_ops *compl_ops) 1404 { 1405 struct nfs_server *server = NFS_SERVER(inode); 1406 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops; 1407 1408 #if IS_ENABLED(CONFIG_NFS_V4) 1409 if (server->pnfs_curr_ld && !force_mds) 1410 pg_ops = server->pnfs_curr_ld->pg_write_ops; 1411 #endif 1412 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops, 1413 server->wsize, ioflags); 1414 } 1415 EXPORT_SYMBOL_GPL(nfs_pageio_init_write); 1416 1417 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio) 1418 { 1419 struct nfs_pgio_mirror *mirror; 1420 1421 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) 1422 pgio->pg_ops->pg_cleanup(pgio); 1423 1424 pgio->pg_ops = &nfs_pgio_rw_ops; 1425 1426 nfs_pageio_stop_mirroring(pgio); 1427 1428 mirror = &pgio->pg_mirrors[0]; 1429 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; 1430 } 1431 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); 1432 1433 1434 void nfs_commit_prepare(struct rpc_task *task, void *calldata) 1435 { 1436 struct nfs_commit_data *data = calldata; 1437 1438 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); 1439 } 1440 1441 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr, 1442 struct nfs_fattr *fattr) 1443 { 1444 struct nfs_pgio_args *argp = &hdr->args; 1445 struct nfs_pgio_res *resp = &hdr->res; 1446 u64 size = argp->offset + resp->count; 1447 1448 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1449 fattr->size = size; 1450 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { 1451 fattr->valid &= ~NFS_ATTR_FATTR_SIZE; 1452 return; 1453 } 1454 if (size != fattr->size) 1455 return; 1456 /* Set attribute barrier */ 1457 nfs_fattr_set_barrier(fattr); 1458 /* ...and update size */ 1459 fattr->valid |= NFS_ATTR_FATTR_SIZE; 1460 } 1461 1462 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1463 { 1464 struct nfs_fattr *fattr = &hdr->fattr; 1465 struct inode *inode = hdr->inode; 1466 1467 if (nfs_have_delegated_mtime(inode)) { 1468 spin_lock(&inode->i_lock); 1469 nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS); 1470 spin_unlock(&inode->i_lock); 1471 return; 1472 } 1473 1474 spin_lock(&inode->i_lock); 1475 nfs_writeback_check_extend(hdr, fattr); 1476 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1477 spin_unlock(&inode->i_lock); 1478 } 1479 EXPORT_SYMBOL_GPL(nfs_writeback_update_inode); 1480 1481 /* 1482 * This function is called when the WRITE call is complete. 1483 */ 1484 static int nfs_writeback_done(struct rpc_task *task, 1485 struct nfs_pgio_header *hdr, 1486 struct inode *inode) 1487 { 1488 int status; 1489 1490 /* 1491 * ->write_done will attempt to use post-op attributes to detect 1492 * conflicting writes by other clients. A strict interpretation 1493 * of close-to-open would allow us to continue caching even if 1494 * another writer had changed the file, but some applications 1495 * depend on tighter cache coherency when writing. 1496 */ 1497 status = NFS_PROTO(inode)->write_done(task, hdr); 1498 if (status != 0) 1499 return status; 1500 1501 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); 1502 trace_nfs_writeback_done(task, hdr); 1503 1504 if (task->tk_status >= 0) { 1505 enum nfs3_stable_how committed = hdr->res.verf->committed; 1506 1507 if (committed == NFS_UNSTABLE) { 1508 /* 1509 * We have some uncommitted data on the server at 1510 * this point, so ensure that we keep track of that 1511 * fact irrespective of what later writes do. 1512 */ 1513 set_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags); 1514 } 1515 1516 if (committed < hdr->args.stable) { 1517 /* We tried a write call, but the server did not 1518 * commit data to stable storage even though we 1519 * requested it. 1520 * Note: There is a known bug in Tru64 < 5.0 in which 1521 * the server reports NFS_DATA_SYNC, but performs 1522 * NFS_FILE_SYNC. We therefore implement this checking 1523 * as a dprintk() in order to avoid filling syslog. 1524 */ 1525 static unsigned long complain; 1526 1527 /* Note this will print the MDS for a DS write */ 1528 if (time_before(complain, jiffies)) { 1529 dprintk("NFS: faulty NFS server %s:" 1530 " (committed = %d) != (stable = %d)\n", 1531 NFS_SERVER(inode)->nfs_client->cl_hostname, 1532 committed, hdr->args.stable); 1533 complain = jiffies + 300 * HZ; 1534 } 1535 } 1536 } 1537 1538 /* Deal with the suid/sgid bit corner case */ 1539 if (nfs_should_remove_suid(inode)) { 1540 spin_lock(&inode->i_lock); 1541 nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE 1542 | NFS_INO_REVAL_FORCED); 1543 spin_unlock(&inode->i_lock); 1544 } 1545 return 0; 1546 } 1547 1548 /* 1549 * This function is called when the WRITE call is complete. 1550 */ 1551 static void nfs_writeback_result(struct rpc_task *task, 1552 struct nfs_pgio_header *hdr) 1553 { 1554 struct nfs_pgio_args *argp = &hdr->args; 1555 struct nfs_pgio_res *resp = &hdr->res; 1556 1557 if (resp->count < argp->count && !list_empty(&hdr->pages)) { 1558 static unsigned long complain; 1559 struct nfs_open_context *ctx = 1560 hdr->req->wb_lock_context->open_context; 1561 1562 set_bit(NFS_CONTEXT_WRITE_SYNC, &ctx->flags); 1563 /* This a short write! */ 1564 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); 1565 1566 /* Has the server at least made some progress? */ 1567 if (resp->count == 0) { 1568 if (time_before(complain, jiffies)) { 1569 printk(KERN_WARNING 1570 "NFS: Server wrote zero bytes, expected %u.\n", 1571 argp->count); 1572 complain = jiffies + 300 * HZ; 1573 } 1574 nfs_set_pgio_error(hdr, -EIO, argp->offset); 1575 task->tk_status = -EIO; 1576 return; 1577 } 1578 1579 /* For non rpc-based layout drivers, retry-through-MDS */ 1580 if (!task->tk_ops) { 1581 hdr->pnfs_error = -EAGAIN; 1582 return; 1583 } 1584 1585 /* Was this an NFSv2 write or an NFSv3 stable write? */ 1586 if (resp->verf->committed != NFS_UNSTABLE) { 1587 /* Resend from where the server left off */ 1588 hdr->mds_offset += resp->count; 1589 argp->offset += resp->count; 1590 argp->pgbase += resp->count; 1591 argp->count -= resp->count; 1592 } else { 1593 /* Resend as a stable write in order to avoid 1594 * headaches in the case of a server crash. 1595 */ 1596 argp->stable = NFS_FILE_SYNC; 1597 } 1598 resp->count = 0; 1599 resp->verf->committed = 0; 1600 rpc_restart_call_prepare(task); 1601 } 1602 } 1603 1604 static int wait_on_commit(struct nfs_mds_commit_info *cinfo) 1605 { 1606 return wait_var_event_killable(&cinfo->rpcs_out, 1607 !atomic_read(&cinfo->rpcs_out)); 1608 } 1609 1610 void nfs_commit_begin(struct nfs_mds_commit_info *cinfo) 1611 { 1612 atomic_inc(&cinfo->rpcs_out); 1613 } 1614 1615 bool nfs_commit_end(struct nfs_mds_commit_info *cinfo) 1616 { 1617 if (atomic_dec_and_test(&cinfo->rpcs_out)) { 1618 wake_up_var(&cinfo->rpcs_out); 1619 return true; 1620 } 1621 return false; 1622 } 1623 1624 void nfs_commitdata_release(struct nfs_commit_data *data) 1625 { 1626 put_nfs_open_context(data->context); 1627 nfs_commit_free(data); 1628 } 1629 EXPORT_SYMBOL_GPL(nfs_commitdata_release); 1630 1631 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, 1632 const struct nfs_rpc_ops *nfs_ops, 1633 const struct rpc_call_ops *call_ops, 1634 int how, int flags, 1635 struct nfsd_file *localio) 1636 { 1637 struct rpc_task *task; 1638 int priority = flush_task_priority(how); 1639 struct rpc_message msg = { 1640 .rpc_argp = &data->args, 1641 .rpc_resp = &data->res, 1642 .rpc_cred = data->cred, 1643 }; 1644 struct rpc_task_setup task_setup_data = { 1645 .task = &data->task, 1646 .rpc_client = clnt, 1647 .rpc_message = &msg, 1648 .callback_ops = call_ops, 1649 .callback_data = data, 1650 .workqueue = nfsiod_workqueue, 1651 .flags = RPC_TASK_ASYNC | flags, 1652 .priority = priority, 1653 }; 1654 1655 if (nfs_server_capable(data->inode, NFS_CAP_MOVEABLE)) 1656 task_setup_data.flags |= RPC_TASK_MOVEABLE; 1657 1658 /* Set up the initial task struct. */ 1659 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client); 1660 trace_nfs_initiate_commit(data); 1661 1662 dprintk("NFS: initiated commit call\n"); 1663 1664 if (localio) 1665 return nfs_local_commit(localio, data, call_ops, how); 1666 1667 task = rpc_run_task(&task_setup_data); 1668 if (IS_ERR(task)) 1669 return PTR_ERR(task); 1670 if (how & FLUSH_SYNC) 1671 rpc_wait_for_completion_task(task); 1672 rpc_put_task(task); 1673 return 0; 1674 } 1675 EXPORT_SYMBOL_GPL(nfs_initiate_commit); 1676 1677 static loff_t nfs_get_lwb(struct list_head *head) 1678 { 1679 loff_t lwb = 0; 1680 struct nfs_page *req; 1681 1682 list_for_each_entry(req, head, wb_list) 1683 if (lwb < (req_offset(req) + req->wb_bytes)) 1684 lwb = req_offset(req) + req->wb_bytes; 1685 1686 return lwb; 1687 } 1688 1689 /* 1690 * Set up the argument/result storage required for the RPC call. 1691 */ 1692 void nfs_init_commit(struct nfs_commit_data *data, 1693 struct list_head *head, 1694 struct pnfs_layout_segment *lseg, 1695 struct nfs_commit_info *cinfo) 1696 { 1697 struct nfs_page *first; 1698 struct nfs_open_context *ctx; 1699 struct inode *inode; 1700 1701 /* Set up the RPC argument and reply structs 1702 * NB: take care not to mess about with data->commit et al. */ 1703 1704 if (head) 1705 list_splice_init(head, &data->pages); 1706 1707 first = nfs_list_entry(data->pages.next); 1708 ctx = nfs_req_openctx(first); 1709 inode = d_inode(ctx->dentry); 1710 1711 data->inode = inode; 1712 data->cred = ctx->cred; 1713 data->lseg = lseg; /* reference transferred */ 1714 /* only set lwb for pnfs commit */ 1715 if (lseg) 1716 data->lwb = nfs_get_lwb(&data->pages); 1717 data->mds_ops = &nfs_commit_ops; 1718 data->completion_ops = cinfo->completion_ops; 1719 data->dreq = cinfo->dreq; 1720 1721 data->args.fh = NFS_FH(data->inode); 1722 /* Note: we always request a commit of the entire inode */ 1723 data->args.offset = 0; 1724 data->args.count = 0; 1725 data->context = get_nfs_open_context(ctx); 1726 data->res.fattr = &data->fattr; 1727 data->res.verf = &data->verf; 1728 nfs_fattr_init(&data->fattr); 1729 nfs_commit_begin(cinfo->mds); 1730 } 1731 EXPORT_SYMBOL_GPL(nfs_init_commit); 1732 1733 void nfs_retry_commit(struct list_head *page_list, 1734 struct pnfs_layout_segment *lseg, 1735 struct nfs_commit_info *cinfo, 1736 u32 ds_commit_idx) 1737 { 1738 struct nfs_page *req; 1739 1740 while (!list_empty(page_list)) { 1741 req = nfs_list_entry(page_list->next); 1742 nfs_list_remove_request(req); 1743 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx); 1744 nfs_folio_clear_commit(nfs_page_to_folio(req)); 1745 nfs_unlock_and_release_request(req); 1746 } 1747 } 1748 EXPORT_SYMBOL_GPL(nfs_retry_commit); 1749 1750 static void nfs_commit_resched_write(struct nfs_commit_info *cinfo, 1751 struct nfs_page *req) 1752 { 1753 struct folio *folio = nfs_page_to_folio(req); 1754 1755 filemap_dirty_folio(folio_mapping(folio), folio); 1756 } 1757 1758 /* 1759 * Commit dirty pages 1760 */ 1761 static int 1762 nfs_commit_list(struct inode *inode, struct list_head *head, int how, 1763 struct nfs_commit_info *cinfo) 1764 { 1765 struct nfs_commit_data *data; 1766 struct nfsd_file *localio; 1767 unsigned short task_flags = 0; 1768 1769 /* another commit raced with us */ 1770 if (list_empty(head)) 1771 return 0; 1772 1773 data = nfs_commitdata_alloc(); 1774 if (!data) { 1775 nfs_retry_commit(head, NULL, cinfo, -1); 1776 return -ENOMEM; 1777 } 1778 1779 /* Set up the argument struct */ 1780 nfs_init_commit(data, head, NULL, cinfo); 1781 if (NFS_SERVER(inode)->nfs_client->cl_minorversion) 1782 task_flags = RPC_TASK_MOVEABLE; 1783 1784 localio = nfs_local_open_fh(NFS_SERVER(inode)->nfs_client, data->cred, 1785 data->args.fh, &data->context->nfl, 1786 data->context->mode); 1787 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), 1788 data->mds_ops, how, 1789 RPC_TASK_CRED_NOREF | task_flags, localio); 1790 } 1791 1792 /* 1793 * COMMIT call returned 1794 */ 1795 static void nfs_commit_done(struct rpc_task *task, void *calldata) 1796 { 1797 struct nfs_commit_data *data = calldata; 1798 1799 /* Call the NFS version-specific code */ 1800 NFS_PROTO(data->inode)->commit_done(task, data); 1801 trace_nfs_commit_done(task, data); 1802 } 1803 1804 static void nfs_commit_release_pages(struct nfs_commit_data *data) 1805 { 1806 const struct nfs_writeverf *verf = data->res.verf; 1807 struct nfs_page *req; 1808 int status = data->task.tk_status; 1809 struct nfs_commit_info cinfo; 1810 struct folio *folio; 1811 1812 while (!list_empty(&data->pages)) { 1813 req = nfs_list_entry(data->pages.next); 1814 nfs_list_remove_request(req); 1815 folio = nfs_page_to_folio(req); 1816 nfs_folio_clear_commit(folio); 1817 1818 dprintk("NFS: commit (%s/%llu %d@%lld)", 1819 nfs_req_openctx(req)->dentry->d_sb->s_id, 1820 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)), 1821 req->wb_bytes, 1822 (long long)req_offset(req)); 1823 if (status < 0) { 1824 if (folio) { 1825 trace_nfs_commit_error(data->inode, req, 1826 status); 1827 nfs_mapping_set_error(folio, status); 1828 nfs_inode_remove_request(req); 1829 } 1830 dprintk(", error = %d\n", status); 1831 goto next; 1832 } 1833 1834 /* Okay, COMMIT succeeded, apparently. Check the verifier 1835 * returned by the server against all stored verfs. */ 1836 if (nfs_write_match_verf(verf, req)) { 1837 /* We have a match */ 1838 if (folio) 1839 nfs_inode_remove_request(req); 1840 dprintk(" OK\n"); 1841 goto next; 1842 } 1843 /* We have a mismatch. Write the page again */ 1844 dprintk(" mismatch\n"); 1845 nfs_mark_request_dirty(req); 1846 set_bit(NFS_CONTEXT_WRITE_SYNC, 1847 &req->wb_lock_context->open_context->flags); 1848 atomic_long_inc(&NFS_I(data->inode)->redirtied_pages); 1849 next: 1850 nfs_unlock_and_release_request(req); 1851 /* Latency breaker */ 1852 cond_resched(); 1853 } 1854 1855 nfs_init_cinfo(&cinfo, data->inode, data->dreq); 1856 nfs_commit_end(cinfo.mds); 1857 } 1858 1859 static void nfs_commit_release(void *calldata) 1860 { 1861 struct nfs_commit_data *data = calldata; 1862 1863 data->completion_ops->completion(data); 1864 nfs_commitdata_release(calldata); 1865 } 1866 1867 static const struct rpc_call_ops nfs_commit_ops = { 1868 .rpc_call_prepare = nfs_commit_prepare, 1869 .rpc_call_done = nfs_commit_done, 1870 .rpc_release = nfs_commit_release, 1871 }; 1872 1873 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = { 1874 .completion = nfs_commit_release_pages, 1875 .resched_write = nfs_commit_resched_write, 1876 }; 1877 1878 int nfs_generic_commit_list(struct inode *inode, struct list_head *head, 1879 int how, struct nfs_commit_info *cinfo) 1880 { 1881 int status; 1882 1883 status = pnfs_commit_list(inode, head, how, cinfo); 1884 if (status == PNFS_NOT_ATTEMPTED) 1885 status = nfs_commit_list(inode, head, how, cinfo); 1886 return status; 1887 } 1888 1889 static int __nfs_commit_inode(struct inode *inode, int how, 1890 struct writeback_control *wbc) 1891 { 1892 LIST_HEAD(head); 1893 struct nfs_commit_info cinfo; 1894 int may_wait = how & FLUSH_SYNC; 1895 int ret, nscan; 1896 1897 how &= ~FLUSH_SYNC; 1898 nfs_init_cinfo_from_inode(&cinfo, inode); 1899 nfs_commit_begin(cinfo.mds); 1900 for (;;) { 1901 ret = nscan = nfs_scan_commit(inode, &head, &cinfo); 1902 if (ret <= 0) 1903 break; 1904 ret = nfs_generic_commit_list(inode, &head, how, &cinfo); 1905 if (ret < 0) 1906 break; 1907 ret = 0; 1908 if (wbc && wbc->sync_mode == WB_SYNC_NONE) { 1909 if (nscan < wbc->nr_to_write) 1910 wbc->nr_to_write -= nscan; 1911 else 1912 wbc->nr_to_write = 0; 1913 } 1914 if (nscan < INT_MAX) 1915 break; 1916 cond_resched(); 1917 } 1918 nfs_commit_end(cinfo.mds); 1919 if (ret || !may_wait) 1920 return ret; 1921 return wait_on_commit(cinfo.mds); 1922 } 1923 1924 int nfs_commit_inode(struct inode *inode, int how) 1925 { 1926 return __nfs_commit_inode(inode, how, NULL); 1927 } 1928 EXPORT_SYMBOL_GPL(nfs_commit_inode); 1929 1930 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc) 1931 { 1932 struct nfs_inode *nfsi = NFS_I(inode); 1933 int flags = FLUSH_SYNC; 1934 int ret = 0; 1935 1936 if (wbc->sync_mode == WB_SYNC_NONE) { 1937 /* no commits means nothing needs to be done */ 1938 if (!atomic_long_read(&nfsi->commit_info.ncommit)) 1939 goto check_requests_outstanding; 1940 1941 /* Don't commit yet if this is a non-blocking flush and there 1942 * are a lot of outstanding writes for this mapping. 1943 */ 1944 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) 1945 goto out_mark_dirty; 1946 1947 /* don't wait for the COMMIT response */ 1948 flags = 0; 1949 } 1950 1951 ret = __nfs_commit_inode(inode, flags, wbc); 1952 if (!ret) { 1953 if (flags & FLUSH_SYNC) 1954 return 0; 1955 } else if (atomic_long_read(&nfsi->commit_info.ncommit)) 1956 goto out_mark_dirty; 1957 1958 check_requests_outstanding: 1959 if (!atomic_read(&nfsi->commit_info.rpcs_out)) 1960 return ret; 1961 out_mark_dirty: 1962 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 1963 return ret; 1964 } 1965 EXPORT_SYMBOL_GPL(nfs_write_inode); 1966 1967 /* 1968 * Wrapper for filemap_write_and_wait_range() 1969 * 1970 * Needed for pNFS in order to ensure data becomes visible to the 1971 * client. 1972 */ 1973 int nfs_filemap_write_and_wait_range(struct address_space *mapping, 1974 loff_t lstart, loff_t lend) 1975 { 1976 int ret; 1977 1978 ret = filemap_write_and_wait_range(mapping, lstart, lend); 1979 if (ret == 0) 1980 ret = pnfs_sync_inode(mapping->host, true); 1981 return ret; 1982 } 1983 EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range); 1984 1985 /* 1986 * flush the inode to disk. 1987 */ 1988 int nfs_wb_all(struct inode *inode) 1989 { 1990 int ret; 1991 1992 trace_nfs_writeback_inode_enter(inode); 1993 1994 ret = filemap_write_and_wait(inode->i_mapping); 1995 if (ret) 1996 goto out; 1997 ret = nfs_commit_inode(inode, FLUSH_SYNC); 1998 if (ret < 0) 1999 goto out; 2000 pnfs_sync_inode(inode, true); 2001 ret = 0; 2002 2003 out: 2004 trace_nfs_writeback_inode_exit(inode, ret); 2005 return ret; 2006 } 2007 EXPORT_SYMBOL_GPL(nfs_wb_all); 2008 2009 int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio) 2010 { 2011 struct nfs_page *req; 2012 int ret = 0; 2013 2014 folio_wait_writeback(folio); 2015 2016 /* blocking call to cancel all requests and join to a single (head) 2017 * request */ 2018 req = nfs_lock_and_join_requests(folio); 2019 2020 if (IS_ERR(req)) { 2021 ret = PTR_ERR(req); 2022 } else if (req) { 2023 /* all requests from this folio have been cancelled by 2024 * nfs_lock_and_join_requests, so just remove the head 2025 * request from the inode / page_private pointer and 2026 * release it */ 2027 nfs_inode_remove_request(req); 2028 nfs_unlock_and_release_request(req); 2029 folio_cancel_dirty(folio); 2030 } 2031 2032 return ret; 2033 } 2034 2035 /** 2036 * nfs_wb_folio_reclaim - Write back all requests on one page 2037 * @inode: pointer to page 2038 * @folio: pointer to folio 2039 * 2040 * Assumes that the folio has been locked by the caller 2041 */ 2042 int nfs_wb_folio_reclaim(struct inode *inode, struct folio *folio) 2043 { 2044 loff_t range_start = folio_pos(folio); 2045 size_t len = folio_size(folio); 2046 struct writeback_control wbc = { 2047 .sync_mode = WB_SYNC_ALL, 2048 .nr_to_write = 0, 2049 .range_start = range_start, 2050 .range_end = range_start + len - 1, 2051 .for_sync = 1, 2052 }; 2053 int ret; 2054 2055 if (folio_test_writeback(folio)) 2056 return -EBUSY; 2057 if (folio_clear_dirty_for_io(folio)) { 2058 trace_nfs_writeback_folio_reclaim(inode, range_start, len); 2059 ret = nfs_writepage_locked(folio, &wbc); 2060 trace_nfs_writeback_folio_reclaim_done(inode, range_start, len, 2061 ret); 2062 return ret; 2063 } 2064 nfs_commit_inode(inode, 0); 2065 return 0; 2066 } 2067 2068 /** 2069 * nfs_wb_folio - Write back all requests on one page 2070 * @inode: pointer to page 2071 * @folio: pointer to folio 2072 * 2073 * Assumes that the folio has been locked by the caller, and will 2074 * not unlock it. 2075 */ 2076 int nfs_wb_folio(struct inode *inode, struct folio *folio) 2077 { 2078 loff_t range_start = folio_pos(folio); 2079 size_t len = folio_size(folio); 2080 struct writeback_control wbc = { 2081 .sync_mode = WB_SYNC_ALL, 2082 .nr_to_write = 0, 2083 .range_start = range_start, 2084 .range_end = range_start + len - 1, 2085 }; 2086 int ret; 2087 2088 trace_nfs_writeback_folio(inode, range_start, len); 2089 2090 for (;;) { 2091 folio_wait_writeback(folio); 2092 if (folio_clear_dirty_for_io(folio)) { 2093 ret = nfs_writepage_locked(folio, &wbc); 2094 if (ret < 0) 2095 goto out_error; 2096 continue; 2097 } 2098 ret = 0; 2099 if (!folio_test_private(folio)) 2100 break; 2101 ret = nfs_commit_inode(inode, FLUSH_SYNC); 2102 if (ret < 0) 2103 goto out_error; 2104 } 2105 out_error: 2106 trace_nfs_writeback_folio_done(inode, range_start, len, ret); 2107 return ret; 2108 } 2109 2110 #ifdef CONFIG_MIGRATION 2111 int nfs_migrate_folio(struct address_space *mapping, struct folio *dst, 2112 struct folio *src, enum migrate_mode mode) 2113 { 2114 /* 2115 * If the private flag is set, the folio is currently associated with 2116 * an in-progress read or write request. Don't try to migrate it. 2117 * 2118 * FIXME: we could do this in principle, but we'll need a way to ensure 2119 * that we can safely release the inode reference while holding 2120 * the folio lock. 2121 */ 2122 if (folio_test_private(src)) { 2123 if (mode == MIGRATE_SYNC) 2124 nfs_wb_folio(src->mapping->host, src); 2125 if (folio_test_private(src)) 2126 return -EBUSY; 2127 } 2128 2129 if (folio_test_private_2(src)) { /* [DEPRECATED] */ 2130 if (mode == MIGRATE_ASYNC) 2131 return -EBUSY; 2132 folio_wait_private_2(src); 2133 } 2134 2135 return migrate_folio(mapping, dst, src, mode); 2136 } 2137 #endif 2138 2139 int __init nfs_init_writepagecache(void) 2140 { 2141 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 2142 sizeof(struct nfs_pgio_header), 2143 0, SLAB_HWCACHE_ALIGN, 2144 NULL); 2145 if (nfs_wdata_cachep == NULL) 2146 return -ENOMEM; 2147 2148 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE, 2149 nfs_wdata_cachep); 2150 if (nfs_wdata_mempool == NULL) 2151 goto out_destroy_write_cache; 2152 2153 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data", 2154 sizeof(struct nfs_commit_data), 2155 0, SLAB_HWCACHE_ALIGN, 2156 NULL); 2157 if (nfs_cdata_cachep == NULL) 2158 goto out_destroy_write_mempool; 2159 2160 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, 2161 nfs_cdata_cachep); 2162 if (nfs_commit_mempool == NULL) 2163 goto out_destroy_commit_cache; 2164 2165 /* 2166 * NFS congestion size, scale with available memory. 2167 * 2168 * 64MB: 8192k 2169 * 128MB: 11585k 2170 * 256MB: 16384k 2171 * 512MB: 23170k 2172 * 1GB: 32768k 2173 * 2GB: 46340k 2174 * 4GB: 65536k 2175 * 8GB: 92681k 2176 * 16GB: 131072k 2177 * 2178 * This allows larger machines to have larger/more transfers. 2179 * Limit the default to 256M 2180 */ 2181 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); 2182 if (nfs_congestion_kb > 256*1024) 2183 nfs_congestion_kb = 256*1024; 2184 2185 return 0; 2186 2187 out_destroy_commit_cache: 2188 kmem_cache_destroy(nfs_cdata_cachep); 2189 out_destroy_write_mempool: 2190 mempool_destroy(nfs_wdata_mempool); 2191 out_destroy_write_cache: 2192 kmem_cache_destroy(nfs_wdata_cachep); 2193 return -ENOMEM; 2194 } 2195 2196 void nfs_destroy_writepagecache(void) 2197 { 2198 mempool_destroy(nfs_commit_mempool); 2199 kmem_cache_destroy(nfs_cdata_cachep); 2200 mempool_destroy(nfs_wdata_mempool); 2201 kmem_cache_destroy(nfs_wdata_cachep); 2202 } 2203 2204 static const struct nfs_rw_ops nfs_rw_write_ops = { 2205 .rw_alloc_header = nfs_writehdr_alloc, 2206 .rw_free_header = nfs_writehdr_free, 2207 .rw_done = nfs_writeback_done, 2208 .rw_result = nfs_writeback_result, 2209 .rw_initiate = nfs_initiate_write, 2210 }; 2211