1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/fs/nfs/write.c
4 *
5 * Write file data over NFS.
6 *
7 * Copyright (C) 1996, 1997, Olaf Kirch <okir@monad.swb.de>
8 */
9
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/mm.h>
13 #include <linux/pagemap.h>
14 #include <linux/file.h>
15 #include <linux/writeback.h>
16 #include <linux/swap.h>
17 #include <linux/migrate.h>
18
19 #include <linux/sunrpc/clnt.h>
20 #include <linux/nfs_fs.h>
21 #include <linux/nfs_mount.h>
22 #include <linux/nfs_page.h>
23 #include <linux/backing-dev.h>
24 #include <linux/export.h>
25 #include <linux/freezer.h>
26 #include <linux/wait.h>
27 #include <linux/iversion.h>
28 #include <linux/filelock.h>
29
30 #include <linux/uaccess.h>
31 #include <linux/sched/mm.h>
32
33 #include "delegation.h"
34 #include "internal.h"
35 #include "iostat.h"
36 #include "nfs4_fs.h"
37 #include "fscache.h"
38 #include "pnfs.h"
39
40 #include "nfstrace.h"
41
42 #define NFSDBG_FACILITY NFSDBG_PAGECACHE
43
44 #define MIN_POOL_WRITE (32)
45 #define MIN_POOL_COMMIT (4)
46
47 struct nfs_io_completion {
48 void (*complete)(void *data);
49 void *data;
50 struct kref refcount;
51 };
52
53 /*
54 * Local function declarations
55 */
56 static void nfs_redirty_request(struct nfs_page *req);
57 static const struct rpc_call_ops nfs_commit_ops;
58 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
59 static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
60 static const struct nfs_rw_ops nfs_rw_write_ops;
61 static void nfs_inode_remove_request(struct nfs_page *req);
62 static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
63 struct nfs_page *req);
64 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
65 struct inode *inode);
66
67 static struct kmem_cache *nfs_wdata_cachep;
68 static mempool_t *nfs_wdata_mempool;
69 static struct kmem_cache *nfs_cdata_cachep;
70 static mempool_t *nfs_commit_mempool;
71
nfs_commitdata_alloc(void)72 struct nfs_commit_data *nfs_commitdata_alloc(void)
73 {
74 struct nfs_commit_data *p;
75
76 p = kmem_cache_zalloc(nfs_cdata_cachep, nfs_io_gfp_mask());
77 if (!p) {
78 p = mempool_alloc(nfs_commit_mempool, GFP_NOWAIT);
79 if (!p)
80 return NULL;
81 memset(p, 0, sizeof(*p));
82 }
83 INIT_LIST_HEAD(&p->pages);
84 return p;
85 }
86 EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
87
nfs_commit_free(struct nfs_commit_data * p)88 void nfs_commit_free(struct nfs_commit_data *p)
89 {
90 mempool_free(p, nfs_commit_mempool);
91 }
92 EXPORT_SYMBOL_GPL(nfs_commit_free);
93
nfs_writehdr_alloc(void)94 static struct nfs_pgio_header *nfs_writehdr_alloc(void)
95 {
96 struct nfs_pgio_header *p;
97
98 p = kmem_cache_zalloc(nfs_wdata_cachep, nfs_io_gfp_mask());
99 if (!p) {
100 p = mempool_alloc(nfs_wdata_mempool, GFP_NOWAIT);
101 if (!p)
102 return NULL;
103 memset(p, 0, sizeof(*p));
104 }
105 p->rw_mode = FMODE_WRITE;
106 return p;
107 }
108
nfs_writehdr_free(struct nfs_pgio_header * hdr)109 static void nfs_writehdr_free(struct nfs_pgio_header *hdr)
110 {
111 mempool_free(hdr, nfs_wdata_mempool);
112 }
113
nfs_io_completion_alloc(gfp_t gfp_flags)114 static struct nfs_io_completion *nfs_io_completion_alloc(gfp_t gfp_flags)
115 {
116 return kmalloc(sizeof(struct nfs_io_completion), gfp_flags);
117 }
118
nfs_io_completion_init(struct nfs_io_completion * ioc,void (* complete)(void *),void * data)119 static void nfs_io_completion_init(struct nfs_io_completion *ioc,
120 void (*complete)(void *), void *data)
121 {
122 ioc->complete = complete;
123 ioc->data = data;
124 kref_init(&ioc->refcount);
125 }
126
nfs_io_completion_release(struct kref * kref)127 static void nfs_io_completion_release(struct kref *kref)
128 {
129 struct nfs_io_completion *ioc = container_of(kref,
130 struct nfs_io_completion, refcount);
131 ioc->complete(ioc->data);
132 kfree(ioc);
133 }
134
nfs_io_completion_get(struct nfs_io_completion * ioc)135 static void nfs_io_completion_get(struct nfs_io_completion *ioc)
136 {
137 if (ioc != NULL)
138 kref_get(&ioc->refcount);
139 }
140
nfs_io_completion_put(struct nfs_io_completion * ioc)141 static void nfs_io_completion_put(struct nfs_io_completion *ioc)
142 {
143 if (ioc != NULL)
144 kref_put(&ioc->refcount, nfs_io_completion_release);
145 }
146
147 static void
nfs_page_set_inode_ref(struct nfs_page * req,struct inode * inode)148 nfs_page_set_inode_ref(struct nfs_page *req, struct inode *inode)
149 {
150 if (!test_and_set_bit(PG_INODE_REF, &req->wb_flags)) {
151 kref_get(&req->wb_kref);
152 atomic_long_inc(&NFS_I(inode)->nrequests);
153 }
154 }
155
156 static int
nfs_cancel_remove_inode(struct nfs_page * req,struct inode * inode)157 nfs_cancel_remove_inode(struct nfs_page *req, struct inode *inode)
158 {
159 int ret;
160
161 if (!test_bit(PG_REMOVE, &req->wb_flags))
162 return 0;
163 ret = nfs_page_group_lock(req);
164 if (ret)
165 return ret;
166 if (test_and_clear_bit(PG_REMOVE, &req->wb_flags))
167 nfs_page_set_inode_ref(req, inode);
168 nfs_page_group_unlock(req);
169 return 0;
170 }
171
172 /**
173 * nfs_folio_find_head_request - find head request associated with a folio
174 * @folio: pointer to folio
175 *
176 * must be called while holding the inode lock.
177 *
178 * returns matching head request with reference held, or NULL if not found.
179 */
nfs_folio_find_head_request(struct folio * folio)180 static struct nfs_page *nfs_folio_find_head_request(struct folio *folio)
181 {
182 struct address_space *mapping = folio->mapping;
183 struct nfs_page *req;
184
185 if (!folio_test_private(folio))
186 return NULL;
187 spin_lock(&mapping->i_private_lock);
188 req = folio->private;
189 if (req) {
190 WARN_ON_ONCE(req->wb_head != req);
191 kref_get(&req->wb_kref);
192 }
193 spin_unlock(&mapping->i_private_lock);
194 return req;
195 }
196
197 /* Adjust the file length if we're writing beyond the end */
nfs_grow_file(struct folio * folio,unsigned int offset,unsigned int count)198 static void nfs_grow_file(struct folio *folio, unsigned int offset,
199 unsigned int count)
200 {
201 struct inode *inode = folio->mapping->host;
202 loff_t end, i_size;
203 pgoff_t end_index;
204
205 spin_lock(&inode->i_lock);
206 i_size = i_size_read(inode);
207 end_index = ((i_size - 1) >> folio_shift(folio)) << folio_order(folio);
208 if (i_size > 0 && folio->index < end_index)
209 goto out;
210 end = folio_pos(folio) + (loff_t)offset + (loff_t)count;
211 if (i_size >= end)
212 goto out;
213 trace_nfs_size_grow(inode, end);
214 i_size_write(inode, end);
215 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE;
216 nfs_inc_stats(inode, NFSIOS_EXTENDWRITE);
217 out:
218 /* Atomically update timestamps if they are delegated to us. */
219 nfs_update_delegated_mtime_locked(inode);
220 spin_unlock(&inode->i_lock);
221 nfs_fscache_invalidate(inode, 0);
222 }
223
224 /* A writeback failed: mark the page as bad, and invalidate the page cache */
nfs_set_pageerror(struct address_space * mapping)225 static void nfs_set_pageerror(struct address_space *mapping)
226 {
227 struct inode *inode = mapping->host;
228
229 nfs_zap_mapping(mapping->host, mapping);
230 /* Force file size revalidation */
231 spin_lock(&inode->i_lock);
232 nfs_set_cache_invalid(inode, NFS_INO_REVAL_FORCED |
233 NFS_INO_INVALID_CHANGE |
234 NFS_INO_INVALID_SIZE);
235 spin_unlock(&inode->i_lock);
236 }
237
nfs_mapping_set_error(struct folio * folio,int error)238 static void nfs_mapping_set_error(struct folio *folio, int error)
239 {
240 struct address_space *mapping = folio->mapping;
241
242 filemap_set_wb_err(mapping, error);
243 if (mapping->host)
244 errseq_set(&mapping->host->i_sb->s_wb_err,
245 error == -ENOSPC ? -ENOSPC : -EIO);
246 nfs_set_pageerror(mapping);
247 }
248
249 /*
250 * nfs_page_group_search_locked
251 * @head - head request of page group
252 * @page_offset - offset into page
253 *
254 * Search page group with head @head to find a request that contains the
255 * page offset @page_offset.
256 *
257 * Returns a pointer to the first matching nfs request, or NULL if no
258 * match is found.
259 *
260 * Must be called with the page group lock held
261 */
262 static struct nfs_page *
nfs_page_group_search_locked(struct nfs_page * head,unsigned int page_offset)263 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset)
264 {
265 struct nfs_page *req;
266
267 req = head;
268 do {
269 if (page_offset >= req->wb_pgbase &&
270 page_offset < (req->wb_pgbase + req->wb_bytes))
271 return req;
272
273 req = req->wb_this_page;
274 } while (req != head);
275
276 return NULL;
277 }
278
279 /*
280 * nfs_page_group_covers_page
281 * @head - head request of page group
282 *
283 * Return true if the page group with head @head covers the whole page,
284 * returns false otherwise
285 */
nfs_page_group_covers_page(struct nfs_page * req)286 static bool nfs_page_group_covers_page(struct nfs_page *req)
287 {
288 unsigned int len = nfs_folio_length(nfs_page_to_folio(req));
289 struct nfs_page *tmp;
290 unsigned int pos = 0;
291
292 nfs_page_group_lock(req);
293
294 for (;;) {
295 tmp = nfs_page_group_search_locked(req->wb_head, pos);
296 if (!tmp)
297 break;
298 pos = tmp->wb_pgbase + tmp->wb_bytes;
299 }
300
301 nfs_page_group_unlock(req);
302 return pos >= len;
303 }
304
305 /* We can set the PG_uptodate flag if we see that a write request
306 * covers the full page.
307 */
nfs_mark_uptodate(struct nfs_page * req)308 static void nfs_mark_uptodate(struct nfs_page *req)
309 {
310 struct folio *folio = nfs_page_to_folio(req);
311
312 if (folio_test_uptodate(folio))
313 return;
314 if (!nfs_page_group_covers_page(req))
315 return;
316 folio_mark_uptodate(folio);
317 }
318
wb_priority(struct writeback_control * wbc)319 static int wb_priority(struct writeback_control *wbc)
320 {
321 int ret = 0;
322
323 if (wbc->sync_mode == WB_SYNC_ALL)
324 ret = FLUSH_COND_STABLE;
325 return ret;
326 }
327
328 /*
329 * NFS congestion control
330 */
331
332 int nfs_congestion_kb;
333
334 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
335 #define NFS_CONGESTION_OFF_THRESH \
336 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
337
nfs_folio_set_writeback(struct folio * folio)338 static void nfs_folio_set_writeback(struct folio *folio)
339 {
340 struct nfs_server *nfss = NFS_SERVER(folio->mapping->host);
341
342 folio_start_writeback(folio);
343 if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH)
344 nfss->write_congested = 1;
345 }
346
nfs_folio_end_writeback(struct folio * folio)347 static void nfs_folio_end_writeback(struct folio *folio)
348 {
349 struct nfs_server *nfss = NFS_SERVER(folio->mapping->host);
350
351 folio_end_writeback(folio);
352 if (atomic_long_dec_return(&nfss->writeback) <
353 NFS_CONGESTION_OFF_THRESH) {
354 nfss->write_congested = 0;
355 wake_up_all(&nfss->write_congestion_wait);
356 }
357 }
358
nfs_page_end_writeback(struct nfs_page * req)359 static void nfs_page_end_writeback(struct nfs_page *req)
360 {
361 if (nfs_page_group_sync_on_bit(req, PG_WB_END)) {
362 nfs_unlock_request(req);
363 nfs_folio_end_writeback(nfs_page_to_folio(req));
364 } else
365 nfs_unlock_request(req);
366 }
367
368 /*
369 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
370 *
371 * @destroy_list - request list (using wb_this_page) terminated by @old_head
372 * @old_head - the old head of the list
373 *
374 * All subrequests must be locked and removed from all lists, so at this point
375 * they are only "active" in this function, and possibly in nfs_wait_on_request
376 * with a reference held by some other context.
377 */
378 static void
nfs_destroy_unlinked_subrequests(struct nfs_page * destroy_list,struct nfs_page * old_head,struct inode * inode)379 nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
380 struct nfs_page *old_head,
381 struct inode *inode)
382 {
383 while (destroy_list) {
384 struct nfs_page *subreq = destroy_list;
385
386 destroy_list = (subreq->wb_this_page == old_head) ?
387 NULL : subreq->wb_this_page;
388
389 /* Note: lock subreq in order to change subreq->wb_head */
390 nfs_page_set_headlock(subreq);
391 WARN_ON_ONCE(old_head != subreq->wb_head);
392
393 /* make sure old group is not used */
394 subreq->wb_this_page = subreq;
395 subreq->wb_head = subreq;
396
397 clear_bit(PG_REMOVE, &subreq->wb_flags);
398
399 /* Note: races with nfs_page_group_destroy() */
400 if (!kref_read(&subreq->wb_kref)) {
401 /* Check if we raced with nfs_page_group_destroy() */
402 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) {
403 nfs_page_clear_headlock(subreq);
404 nfs_free_request(subreq);
405 } else
406 nfs_page_clear_headlock(subreq);
407 continue;
408 }
409 nfs_page_clear_headlock(subreq);
410
411 nfs_release_request(old_head);
412
413 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
414 nfs_release_request(subreq);
415 atomic_long_dec(&NFS_I(inode)->nrequests);
416 }
417
418 /* subreq is now totally disconnected from page group or any
419 * write / commit lists. last chance to wake any waiters */
420 nfs_unlock_and_release_request(subreq);
421 }
422 }
423
424 /*
425 * nfs_join_page_group - destroy subrequests of the head req
426 * @head: the page used to lookup the "page group" of nfs_page structures
427 * @inode: Inode to which the request belongs.
428 *
429 * This function joins all sub requests to the head request by first
430 * locking all requests in the group, cancelling any pending operations
431 * and finally updating the head request to cover the whole range covered by
432 * the (former) group. All subrequests are removed from any write or commit
433 * lists, unlinked from the group and destroyed.
434 */
nfs_join_page_group(struct nfs_page * head,struct nfs_commit_info * cinfo,struct inode * inode)435 void nfs_join_page_group(struct nfs_page *head, struct nfs_commit_info *cinfo,
436 struct inode *inode)
437 {
438 struct nfs_page *subreq;
439 struct nfs_page *destroy_list = NULL;
440 unsigned int pgbase, off, bytes;
441
442 pgbase = head->wb_pgbase;
443 bytes = head->wb_bytes;
444 off = head->wb_offset;
445 for (subreq = head->wb_this_page; subreq != head;
446 subreq = subreq->wb_this_page) {
447 /* Subrequests should always form a contiguous range */
448 if (pgbase > subreq->wb_pgbase) {
449 off -= pgbase - subreq->wb_pgbase;
450 bytes += pgbase - subreq->wb_pgbase;
451 pgbase = subreq->wb_pgbase;
452 }
453 bytes = max(subreq->wb_pgbase + subreq->wb_bytes
454 - pgbase, bytes);
455 }
456
457 /* Set the head request's range to cover the former page group */
458 head->wb_pgbase = pgbase;
459 head->wb_bytes = bytes;
460 head->wb_offset = off;
461
462 /* Now that all requests are locked, make sure they aren't on any list.
463 * Commit list removal accounting is done after locks are dropped */
464 subreq = head;
465 do {
466 nfs_clear_request_commit(cinfo, subreq);
467 subreq = subreq->wb_this_page;
468 } while (subreq != head);
469
470 /* unlink subrequests from head, destroy them later */
471 if (head->wb_this_page != head) {
472 /* destroy list will be terminated by head */
473 destroy_list = head->wb_this_page;
474 head->wb_this_page = head;
475 }
476
477 nfs_destroy_unlinked_subrequests(destroy_list, head, inode);
478 }
479
480 /**
481 * nfs_wait_on_request - Wait for a request to complete.
482 * @req: request to wait upon.
483 *
484 * Interruptible by fatal signals only.
485 * The user is responsible for holding a count on the request.
486 */
nfs_wait_on_request(struct nfs_page * req)487 static int nfs_wait_on_request(struct nfs_page *req)
488 {
489 if (!test_bit(PG_BUSY, &req->wb_flags))
490 return 0;
491 set_bit(PG_CONTENDED2, &req->wb_flags);
492 smp_mb__after_atomic();
493 return wait_on_bit_io(&req->wb_flags, PG_BUSY,
494 TASK_UNINTERRUPTIBLE);
495 }
496
497 /*
498 * nfs_unroll_locks - unlock all newly locked reqs and wait on @req
499 * @head: head request of page group, must be holding head lock
500 * @req: request that couldn't lock and needs to wait on the req bit lock
501 *
502 * This is a helper function for nfs_lock_and_join_requests
503 * returns 0 on success, < 0 on error.
504 */
505 static void
nfs_unroll_locks(struct nfs_page * head,struct nfs_page * req)506 nfs_unroll_locks(struct nfs_page *head, struct nfs_page *req)
507 {
508 struct nfs_page *tmp;
509
510 /* relinquish all the locks successfully grabbed this run */
511 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) {
512 if (!kref_read(&tmp->wb_kref))
513 continue;
514 nfs_unlock_and_release_request(tmp);
515 }
516 }
517
518 /*
519 * nfs_page_group_lock_subreq - try to lock a subrequest
520 * @head: head request of page group
521 * @subreq: request to lock
522 *
523 * This is a helper function for nfs_lock_and_join_requests which
524 * must be called with the head request and page group both locked.
525 * On error, it returns with the page group unlocked.
526 */
527 static int
nfs_page_group_lock_subreq(struct nfs_page * head,struct nfs_page * subreq)528 nfs_page_group_lock_subreq(struct nfs_page *head, struct nfs_page *subreq)
529 {
530 int ret;
531
532 if (!kref_get_unless_zero(&subreq->wb_kref))
533 return 0;
534 while (!nfs_lock_request(subreq)) {
535 nfs_page_group_unlock(head);
536 ret = nfs_wait_on_request(subreq);
537 if (!ret)
538 ret = nfs_page_group_lock(head);
539 if (ret < 0) {
540 nfs_unroll_locks(head, subreq);
541 nfs_release_request(subreq);
542 return ret;
543 }
544 }
545 return 0;
546 }
547
548 /*
549 * nfs_lock_and_join_requests - join all subreqs to the head req
550 * @folio: the folio used to lookup the "page group" of nfs_page structures
551 *
552 * This function joins all sub requests to the head request by first
553 * locking all requests in the group, cancelling any pending operations
554 * and finally updating the head request to cover the whole range covered by
555 * the (former) group. All subrequests are removed from any write or commit
556 * lists, unlinked from the group and destroyed.
557 *
558 * Returns a locked, referenced pointer to the head request - which after
559 * this call is guaranteed to be the only request associated with the page.
560 * Returns NULL if no requests are found for @folio, or a ERR_PTR if an
561 * error was encountered.
562 */
nfs_lock_and_join_requests(struct folio * folio)563 static struct nfs_page *nfs_lock_and_join_requests(struct folio *folio)
564 {
565 struct inode *inode = folio->mapping->host;
566 struct nfs_page *head, *subreq;
567 struct nfs_commit_info cinfo;
568 int ret;
569
570 /*
571 * A reference is taken only on the head request which acts as a
572 * reference to the whole page group - the group will not be destroyed
573 * until the head reference is released.
574 */
575 retry:
576 head = nfs_folio_find_head_request(folio);
577 if (!head)
578 return NULL;
579
580 while (!nfs_lock_request(head)) {
581 ret = nfs_wait_on_request(head);
582 if (ret < 0) {
583 nfs_release_request(head);
584 return ERR_PTR(ret);
585 }
586 }
587
588 /* Ensure that nobody removed the request before we locked it */
589 if (head != folio->private) {
590 nfs_unlock_and_release_request(head);
591 goto retry;
592 }
593
594 ret = nfs_cancel_remove_inode(head, inode);
595 if (ret < 0)
596 goto out_unlock;
597
598 ret = nfs_page_group_lock(head);
599 if (ret < 0)
600 goto out_unlock;
601
602 /* lock each request in the page group */
603 for (subreq = head->wb_this_page;
604 subreq != head;
605 subreq = subreq->wb_this_page) {
606 ret = nfs_page_group_lock_subreq(head, subreq);
607 if (ret < 0)
608 goto out_unlock;
609 }
610
611 nfs_page_group_unlock(head);
612
613 nfs_init_cinfo_from_inode(&cinfo, inode);
614 nfs_join_page_group(head, &cinfo, inode);
615 return head;
616
617 out_unlock:
618 nfs_unlock_and_release_request(head);
619 return ERR_PTR(ret);
620 }
621
nfs_write_error(struct nfs_page * req,int error)622 static void nfs_write_error(struct nfs_page *req, int error)
623 {
624 trace_nfs_write_error(nfs_page_to_inode(req), req, error);
625 nfs_mapping_set_error(nfs_page_to_folio(req), error);
626 nfs_inode_remove_request(req);
627 nfs_page_end_writeback(req);
628 nfs_release_request(req);
629 }
630
631 /*
632 * Find an associated nfs write request, and prepare to flush it out
633 * May return an error if the user signalled nfs_wait_on_request().
634 */
nfs_do_writepage(struct folio * folio,struct writeback_control * wbc,struct nfs_pageio_descriptor * pgio)635 static int nfs_do_writepage(struct folio *folio, struct writeback_control *wbc,
636 struct nfs_pageio_descriptor *pgio)
637 {
638 struct nfs_page *req;
639 int ret;
640
641 nfs_pageio_cond_complete(pgio, folio->index);
642
643 req = nfs_lock_and_join_requests(folio);
644 if (!req)
645 return 0;
646 if (IS_ERR(req))
647 return PTR_ERR(req);
648
649 nfs_folio_set_writeback(folio);
650 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags));
651
652 /* If there is a fatal error that covers this write, just exit */
653 ret = pgio->pg_error;
654 if (nfs_error_is_fatal_on_server(ret))
655 goto out_launder;
656
657 if (!nfs_pageio_add_request(pgio, req)) {
658 ret = pgio->pg_error;
659 /*
660 * Remove the problematic req upon fatal errors on the server
661 */
662 if (nfs_error_is_fatal_on_server(ret))
663 goto out_launder;
664 folio_redirty_for_writepage(wbc, folio);
665 nfs_redirty_request(req);
666 pgio->pg_error = 0;
667 return ret;
668 }
669
670 nfs_add_stats(folio->mapping->host, NFSIOS_WRITEPAGES, 1);
671 return 0;
672
673 out_launder:
674 nfs_write_error(req, ret);
675 return 0;
676 }
677
678 /*
679 * Write an mmapped page to the server.
680 */
nfs_writepage_locked(struct folio * folio,struct writeback_control * wbc)681 static int nfs_writepage_locked(struct folio *folio,
682 struct writeback_control *wbc)
683 {
684 struct nfs_pageio_descriptor pgio;
685 struct inode *inode = folio->mapping->host;
686 int err;
687
688 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGE);
689 nfs_pageio_init_write(&pgio, inode, 0, false,
690 &nfs_async_write_completion_ops);
691 err = nfs_do_writepage(folio, wbc, &pgio);
692 pgio.pg_error = 0;
693 nfs_pageio_complete(&pgio);
694 return err;
695 }
696
nfs_io_completion_commit(void * inode)697 static void nfs_io_completion_commit(void *inode)
698 {
699 nfs_commit_inode(inode, 0);
700 }
701
nfs_writepages(struct address_space * mapping,struct writeback_control * wbc)702 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
703 {
704 struct inode *inode = mapping->host;
705 struct nfs_pageio_descriptor pgio;
706 struct nfs_io_completion *ioc = NULL;
707 unsigned int mntflags = NFS_SERVER(inode)->flags;
708 struct nfs_server *nfss = NFS_SERVER(inode);
709 int priority = 0;
710 int err;
711
712 /* Wait with writeback until write congestion eases */
713 if (wbc->sync_mode == WB_SYNC_NONE && nfss->write_congested) {
714 err = wait_event_killable(nfss->write_congestion_wait,
715 nfss->write_congested == 0);
716 if (err)
717 return err;
718 }
719
720 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
721
722 if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate ||
723 wbc->for_background || wbc->for_sync) {
724 ioc = nfs_io_completion_alloc(GFP_KERNEL);
725 if (ioc)
726 nfs_io_completion_init(ioc, nfs_io_completion_commit,
727 inode);
728 priority = wb_priority(wbc);
729 }
730
731 do {
732 struct folio *folio = NULL;
733
734 nfs_pageio_init_write(&pgio, inode, priority, false,
735 &nfs_async_write_completion_ops);
736 pgio.pg_io_completion = ioc;
737 while ((folio = writeback_iter(mapping, wbc, folio, &err))) {
738 err = nfs_do_writepage(folio, wbc, &pgio);
739 folio_unlock(folio);
740 }
741 pgio.pg_error = 0;
742 nfs_pageio_complete(&pgio);
743 if (err == -EAGAIN && mntflags & NFS_MOUNT_SOFTERR)
744 break;
745 } while (err < 0 && !nfs_error_is_fatal(err));
746 nfs_io_completion_put(ioc);
747
748 if (err < 0)
749 goto out_err;
750 return 0;
751 out_err:
752 return err;
753 }
754
755 /*
756 * Insert a write request into an inode
757 */
nfs_inode_add_request(struct nfs_page * req)758 static void nfs_inode_add_request(struct nfs_page *req)
759 {
760 struct folio *folio = nfs_page_to_folio(req);
761 struct address_space *mapping = folio->mapping;
762 struct nfs_inode *nfsi = NFS_I(mapping->host);
763
764 WARN_ON_ONCE(req->wb_this_page != req);
765
766 /* Lock the request! */
767 nfs_lock_request(req);
768 spin_lock(&mapping->i_private_lock);
769 set_bit(PG_MAPPED, &req->wb_flags);
770 folio_set_private(folio);
771 folio->private = req;
772 spin_unlock(&mapping->i_private_lock);
773 atomic_long_inc(&nfsi->nrequests);
774 /* this a head request for a page group - mark it as having an
775 * extra reference so sub groups can follow suit.
776 * This flag also informs pgio layer when to bump nrequests when
777 * adding subrequests. */
778 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
779 kref_get(&req->wb_kref);
780 }
781
782 /*
783 * Remove a write request from an inode
784 */
nfs_inode_remove_request(struct nfs_page * req)785 static void nfs_inode_remove_request(struct nfs_page *req)
786 {
787 struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
788
789 if (nfs_page_group_sync_on_bit(req, PG_REMOVE)) {
790 struct folio *folio = nfs_page_to_folio(req->wb_head);
791 struct address_space *mapping = folio->mapping;
792
793 spin_lock(&mapping->i_private_lock);
794 if (likely(folio)) {
795 folio->private = NULL;
796 folio_clear_private(folio);
797 clear_bit(PG_MAPPED, &req->wb_head->wb_flags);
798 }
799 spin_unlock(&mapping->i_private_lock);
800 }
801
802 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) {
803 atomic_long_dec(&nfsi->nrequests);
804 nfs_release_request(req);
805 }
806 }
807
nfs_mark_request_dirty(struct nfs_page * req)808 static void nfs_mark_request_dirty(struct nfs_page *req)
809 {
810 struct folio *folio = nfs_page_to_folio(req);
811 if (folio)
812 filemap_dirty_folio(folio_mapping(folio), folio);
813 }
814
815 /**
816 * nfs_request_add_commit_list_locked - add request to a commit list
817 * @req: pointer to a struct nfs_page
818 * @dst: commit list head
819 * @cinfo: holds list lock and accounting info
820 *
821 * This sets the PG_CLEAN bit, updates the cinfo count of
822 * number of outstanding requests requiring a commit as well as
823 * the MM page stats.
824 *
825 * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the
826 * nfs_page lock.
827 */
828 void
nfs_request_add_commit_list_locked(struct nfs_page * req,struct list_head * dst,struct nfs_commit_info * cinfo)829 nfs_request_add_commit_list_locked(struct nfs_page *req, struct list_head *dst,
830 struct nfs_commit_info *cinfo)
831 {
832 set_bit(PG_CLEAN, &req->wb_flags);
833 nfs_list_add_request(req, dst);
834 atomic_long_inc(&cinfo->mds->ncommit);
835 }
836 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list_locked);
837
838 /**
839 * nfs_request_add_commit_list - add request to a commit list
840 * @req: pointer to a struct nfs_page
841 * @cinfo: holds list lock and accounting info
842 *
843 * This sets the PG_CLEAN bit, updates the cinfo count of
844 * number of outstanding requests requiring a commit as well as
845 * the MM page stats.
846 *
847 * The caller must _not_ hold the cinfo->lock, but must be
848 * holding the nfs_page lock.
849 */
850 void
nfs_request_add_commit_list(struct nfs_page * req,struct nfs_commit_info * cinfo)851 nfs_request_add_commit_list(struct nfs_page *req, struct nfs_commit_info *cinfo)
852 {
853 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
854 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo);
855 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
856 nfs_folio_mark_unstable(nfs_page_to_folio(req), cinfo);
857 }
858 EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
859
860 /**
861 * nfs_request_remove_commit_list - Remove request from a commit list
862 * @req: pointer to a nfs_page
863 * @cinfo: holds list lock and accounting info
864 *
865 * This clears the PG_CLEAN bit, and updates the cinfo's count of
866 * number of outstanding requests requiring a commit
867 * It does not update the MM page stats.
868 *
869 * The caller _must_ hold the cinfo->lock and the nfs_page lock.
870 */
871 void
nfs_request_remove_commit_list(struct nfs_page * req,struct nfs_commit_info * cinfo)872 nfs_request_remove_commit_list(struct nfs_page *req,
873 struct nfs_commit_info *cinfo)
874 {
875 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
876 return;
877 nfs_list_remove_request(req);
878 atomic_long_dec(&cinfo->mds->ncommit);
879 }
880 EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
881
nfs_init_cinfo_from_inode(struct nfs_commit_info * cinfo,struct inode * inode)882 static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
883 struct inode *inode)
884 {
885 cinfo->inode = inode;
886 cinfo->mds = &NFS_I(inode)->commit_info;
887 cinfo->ds = pnfs_get_ds_info(inode);
888 cinfo->dreq = NULL;
889 cinfo->completion_ops = &nfs_commit_completion_ops;
890 }
891
nfs_init_cinfo(struct nfs_commit_info * cinfo,struct inode * inode,struct nfs_direct_req * dreq)892 void nfs_init_cinfo(struct nfs_commit_info *cinfo,
893 struct inode *inode,
894 struct nfs_direct_req *dreq)
895 {
896 if (dreq)
897 nfs_init_cinfo_from_dreq(cinfo, dreq);
898 else
899 nfs_init_cinfo_from_inode(cinfo, inode);
900 }
901 EXPORT_SYMBOL_GPL(nfs_init_cinfo);
902
903 /*
904 * Add a request to the inode's commit list.
905 */
906 void
nfs_mark_request_commit(struct nfs_page * req,struct pnfs_layout_segment * lseg,struct nfs_commit_info * cinfo,u32 ds_commit_idx)907 nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
908 struct nfs_commit_info *cinfo, u32 ds_commit_idx)
909 {
910 if (pnfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx))
911 return;
912 nfs_request_add_commit_list(req, cinfo);
913 }
914
nfs_folio_clear_commit(struct folio * folio)915 static void nfs_folio_clear_commit(struct folio *folio)
916 {
917 if (folio) {
918 long nr = folio_nr_pages(folio);
919
920 node_stat_mod_folio(folio, NR_WRITEBACK, -nr);
921 wb_stat_mod(&inode_to_bdi(folio->mapping->host)->wb,
922 WB_WRITEBACK, -nr);
923 }
924 }
925
926 /* Called holding the request lock on @req */
nfs_clear_request_commit(struct nfs_commit_info * cinfo,struct nfs_page * req)927 static void nfs_clear_request_commit(struct nfs_commit_info *cinfo,
928 struct nfs_page *req)
929 {
930 if (test_bit(PG_CLEAN, &req->wb_flags)) {
931 struct nfs_open_context *ctx = nfs_req_openctx(req);
932 struct inode *inode = d_inode(ctx->dentry);
933
934 mutex_lock(&NFS_I(inode)->commit_mutex);
935 if (!pnfs_clear_request_commit(req, cinfo)) {
936 nfs_request_remove_commit_list(req, cinfo);
937 }
938 mutex_unlock(&NFS_I(inode)->commit_mutex);
939 nfs_folio_clear_commit(nfs_page_to_folio(req));
940 }
941 }
942
nfs_write_need_commit(struct nfs_pgio_header * hdr)943 int nfs_write_need_commit(struct nfs_pgio_header *hdr)
944 {
945 if (hdr->verf.committed == NFS_DATA_SYNC)
946 return hdr->lseg == NULL;
947 return hdr->verf.committed != NFS_FILE_SYNC;
948 }
949
nfs_async_write_init(struct nfs_pgio_header * hdr)950 static void nfs_async_write_init(struct nfs_pgio_header *hdr)
951 {
952 nfs_io_completion_get(hdr->io_completion);
953 }
954
nfs_write_completion(struct nfs_pgio_header * hdr)955 static void nfs_write_completion(struct nfs_pgio_header *hdr)
956 {
957 struct nfs_commit_info cinfo;
958 unsigned long bytes = 0;
959
960 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
961 goto out;
962 nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
963 while (!list_empty(&hdr->pages)) {
964 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
965
966 bytes += req->wb_bytes;
967 nfs_list_remove_request(req);
968 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
969 (hdr->good_bytes < bytes)) {
970 trace_nfs_comp_error(hdr->inode, req, hdr->error);
971 nfs_mapping_set_error(nfs_page_to_folio(req),
972 hdr->error);
973 goto remove_req;
974 }
975 if (nfs_write_need_commit(hdr)) {
976 /* Reset wb_nio, since the write was successful. */
977 req->wb_nio = 0;
978 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf));
979 nfs_mark_request_commit(req, hdr->lseg, &cinfo,
980 hdr->pgio_mirror_idx);
981 goto next;
982 }
983 remove_req:
984 nfs_inode_remove_request(req);
985 next:
986 nfs_page_end_writeback(req);
987 nfs_release_request(req);
988 }
989 out:
990 nfs_io_completion_put(hdr->io_completion);
991 hdr->release(hdr);
992 }
993
994 unsigned long
nfs_reqs_to_commit(struct nfs_commit_info * cinfo)995 nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
996 {
997 return atomic_long_read(&cinfo->mds->ncommit);
998 }
999
1000 /* NFS_I(cinfo->inode)->commit_mutex held by caller */
1001 int
nfs_scan_commit_list(struct list_head * src,struct list_head * dst,struct nfs_commit_info * cinfo,int max)1002 nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
1003 struct nfs_commit_info *cinfo, int max)
1004 {
1005 struct nfs_page *req, *tmp;
1006 int ret = 0;
1007
1008 list_for_each_entry_safe(req, tmp, src, wb_list) {
1009 kref_get(&req->wb_kref);
1010 if (!nfs_lock_request(req)) {
1011 nfs_release_request(req);
1012 continue;
1013 }
1014 nfs_request_remove_commit_list(req, cinfo);
1015 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1016 nfs_list_add_request(req, dst);
1017 ret++;
1018 if ((ret == max) && !cinfo->dreq)
1019 break;
1020 cond_resched();
1021 }
1022 return ret;
1023 }
1024 EXPORT_SYMBOL_GPL(nfs_scan_commit_list);
1025
1026 /*
1027 * nfs_scan_commit - Scan an inode for commit requests
1028 * @inode: NFS inode to scan
1029 * @dst: mds destination list
1030 * @cinfo: mds and ds lists of reqs ready to commit
1031 *
1032 * Moves requests from the inode's 'commit' request list.
1033 * The requests are *not* checked to ensure that they form a contiguous set.
1034 */
1035 int
nfs_scan_commit(struct inode * inode,struct list_head * dst,struct nfs_commit_info * cinfo)1036 nfs_scan_commit(struct inode *inode, struct list_head *dst,
1037 struct nfs_commit_info *cinfo)
1038 {
1039 int ret = 0;
1040
1041 if (!atomic_long_read(&cinfo->mds->ncommit))
1042 return 0;
1043 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex);
1044 if (atomic_long_read(&cinfo->mds->ncommit) > 0) {
1045 const int max = INT_MAX;
1046
1047 ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
1048 cinfo, max);
1049 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
1050 }
1051 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex);
1052 return ret;
1053 }
1054
1055 /*
1056 * Search for an existing write request, and attempt to update
1057 * it to reflect a new dirty region on a given page.
1058 *
1059 * If the attempt fails, then the existing request is flushed out
1060 * to disk.
1061 */
nfs_try_to_update_request(struct folio * folio,unsigned int offset,unsigned int bytes)1062 static struct nfs_page *nfs_try_to_update_request(struct folio *folio,
1063 unsigned int offset,
1064 unsigned int bytes)
1065 {
1066 struct nfs_page *req;
1067 unsigned int rqend;
1068 unsigned int end;
1069 int error;
1070
1071 end = offset + bytes;
1072
1073 req = nfs_lock_and_join_requests(folio);
1074 if (IS_ERR_OR_NULL(req))
1075 return req;
1076
1077 rqend = req->wb_offset + req->wb_bytes;
1078 /*
1079 * Tell the caller to flush out the request if
1080 * the offsets are non-contiguous.
1081 * Note: nfs_flush_incompatible() will already
1082 * have flushed out requests having wrong owners.
1083 */
1084 if (offset > rqend || end < req->wb_offset)
1085 goto out_flushme;
1086
1087 /* Okay, the request matches. Update the region */
1088 if (offset < req->wb_offset) {
1089 req->wb_offset = offset;
1090 req->wb_pgbase = offset;
1091 }
1092 if (end > rqend)
1093 req->wb_bytes = end - req->wb_offset;
1094 else
1095 req->wb_bytes = rqend - req->wb_offset;
1096 req->wb_nio = 0;
1097 return req;
1098 out_flushme:
1099 /*
1100 * Note: we mark the request dirty here because
1101 * nfs_lock_and_join_requests() cannot preserve
1102 * commit flags, so we have to replay the write.
1103 */
1104 nfs_mark_request_dirty(req);
1105 nfs_unlock_and_release_request(req);
1106 error = nfs_wb_folio(folio->mapping->host, folio);
1107 return (error < 0) ? ERR_PTR(error) : NULL;
1108 }
1109
1110 /*
1111 * Try to update an existing write request, or create one if there is none.
1112 *
1113 * Note: Should always be called with the Page Lock held to prevent races
1114 * if we have to add a new request. Also assumes that the caller has
1115 * already called nfs_flush_incompatible() if necessary.
1116 */
nfs_setup_write_request(struct nfs_open_context * ctx,struct folio * folio,unsigned int offset,unsigned int bytes)1117 static struct nfs_page *nfs_setup_write_request(struct nfs_open_context *ctx,
1118 struct folio *folio,
1119 unsigned int offset,
1120 unsigned int bytes)
1121 {
1122 struct nfs_page *req;
1123
1124 req = nfs_try_to_update_request(folio, offset, bytes);
1125 if (req != NULL)
1126 goto out;
1127 req = nfs_page_create_from_folio(ctx, folio, offset, bytes);
1128 if (IS_ERR(req))
1129 goto out;
1130 nfs_inode_add_request(req);
1131 out:
1132 return req;
1133 }
1134
nfs_writepage_setup(struct nfs_open_context * ctx,struct folio * folio,unsigned int offset,unsigned int count)1135 static int nfs_writepage_setup(struct nfs_open_context *ctx,
1136 struct folio *folio, unsigned int offset,
1137 unsigned int count)
1138 {
1139 struct nfs_page *req;
1140
1141 req = nfs_setup_write_request(ctx, folio, offset, count);
1142 if (IS_ERR(req))
1143 return PTR_ERR(req);
1144 /* Update file length */
1145 nfs_grow_file(folio, offset, count);
1146 nfs_mark_uptodate(req);
1147 nfs_mark_request_dirty(req);
1148 nfs_unlock_and_release_request(req);
1149 return 0;
1150 }
1151
nfs_flush_incompatible(struct file * file,struct folio * folio)1152 int nfs_flush_incompatible(struct file *file, struct folio *folio)
1153 {
1154 struct nfs_open_context *ctx = nfs_file_open_context(file);
1155 struct nfs_lock_context *l_ctx;
1156 struct file_lock_context *flctx = locks_inode_context(file_inode(file));
1157 struct nfs_page *req;
1158 int do_flush, status;
1159 /*
1160 * Look for a request corresponding to this page. If there
1161 * is one, and it belongs to another file, we flush it out
1162 * before we try to copy anything into the page. Do this
1163 * due to the lack of an ACCESS-type call in NFSv2.
1164 * Also do the same if we find a request from an existing
1165 * dropped page.
1166 */
1167 do {
1168 req = nfs_folio_find_head_request(folio);
1169 if (req == NULL)
1170 return 0;
1171 l_ctx = req->wb_lock_context;
1172 do_flush = nfs_page_to_folio(req) != folio ||
1173 !nfs_match_open_context(nfs_req_openctx(req), ctx);
1174 if (l_ctx && flctx &&
1175 !(list_empty_careful(&flctx->flc_posix) &&
1176 list_empty_careful(&flctx->flc_flock))) {
1177 do_flush |= l_ctx->lockowner != current->files;
1178 }
1179 nfs_release_request(req);
1180 if (!do_flush)
1181 return 0;
1182 status = nfs_wb_folio(folio->mapping->host, folio);
1183 } while (status == 0);
1184 return status;
1185 }
1186
1187 /*
1188 * Avoid buffered writes when a open context credential's key would
1189 * expire soon.
1190 *
1191 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
1192 *
1193 * Return 0 and set a credential flag which triggers the inode to flush
1194 * and performs NFS_FILE_SYNC writes if the key will expired within
1195 * RPC_KEY_EXPIRE_TIMEO.
1196 */
1197 int
nfs_key_timeout_notify(struct file * filp,struct inode * inode)1198 nfs_key_timeout_notify(struct file *filp, struct inode *inode)
1199 {
1200 struct nfs_open_context *ctx = nfs_file_open_context(filp);
1201
1202 if (nfs_ctx_key_to_expire(ctx, inode) &&
1203 !rcu_access_pointer(ctx->ll_cred))
1204 /* Already expired! */
1205 return -EACCES;
1206 return 0;
1207 }
1208
1209 /*
1210 * Test if the open context credential key is marked to expire soon.
1211 */
nfs_ctx_key_to_expire(struct nfs_open_context * ctx,struct inode * inode)1212 bool nfs_ctx_key_to_expire(struct nfs_open_context *ctx, struct inode *inode)
1213 {
1214 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth;
1215 struct rpc_cred *cred, *new, *old = NULL;
1216 struct auth_cred acred = {
1217 .cred = ctx->cred,
1218 };
1219 bool ret = false;
1220
1221 rcu_read_lock();
1222 cred = rcu_dereference(ctx->ll_cred);
1223 if (cred && !(cred->cr_ops->crkey_timeout &&
1224 cred->cr_ops->crkey_timeout(cred)))
1225 goto out;
1226 rcu_read_unlock();
1227
1228 new = auth->au_ops->lookup_cred(auth, &acred, 0);
1229 if (new == cred) {
1230 put_rpccred(new);
1231 return true;
1232 }
1233 if (IS_ERR_OR_NULL(new)) {
1234 new = NULL;
1235 ret = true;
1236 } else if (new->cr_ops->crkey_timeout &&
1237 new->cr_ops->crkey_timeout(new))
1238 ret = true;
1239
1240 rcu_read_lock();
1241 old = rcu_dereference_protected(xchg(&ctx->ll_cred,
1242 RCU_INITIALIZER(new)), 1);
1243 out:
1244 rcu_read_unlock();
1245 put_rpccred(old);
1246 return ret;
1247 }
1248
1249 /*
1250 * If the page cache is marked as unsafe or invalid, then we can't rely on
1251 * the PageUptodate() flag. In this case, we will need to turn off
1252 * write optimisations that depend on the page contents being correct.
1253 */
nfs_folio_write_uptodate(struct folio * folio,unsigned int pagelen)1254 static bool nfs_folio_write_uptodate(struct folio *folio, unsigned int pagelen)
1255 {
1256 struct inode *inode = folio->mapping->host;
1257 struct nfs_inode *nfsi = NFS_I(inode);
1258
1259 if (nfs_have_delegated_attributes(inode))
1260 goto out;
1261 if (nfsi->cache_validity &
1262 (NFS_INO_INVALID_CHANGE | NFS_INO_INVALID_SIZE))
1263 return false;
1264 smp_rmb();
1265 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags) && pagelen != 0)
1266 return false;
1267 out:
1268 if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0)
1269 return false;
1270 return folio_test_uptodate(folio) != 0;
1271 }
1272
1273 static bool
is_whole_file_wrlock(struct file_lock * fl)1274 is_whole_file_wrlock(struct file_lock *fl)
1275 {
1276 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX &&
1277 lock_is_write(fl);
1278 }
1279
1280 /* If we know the page is up to date, and we're not using byte range locks (or
1281 * if we have the whole file locked for writing), it may be more efficient to
1282 * extend the write to cover the entire page in order to avoid fragmentation
1283 * inefficiencies.
1284 *
1285 * If the file is opened for synchronous writes then we can just skip the rest
1286 * of the checks.
1287 */
nfs_can_extend_write(struct file * file,struct folio * folio,unsigned int pagelen)1288 static int nfs_can_extend_write(struct file *file, struct folio *folio,
1289 unsigned int pagelen)
1290 {
1291 struct inode *inode = file_inode(file);
1292 struct file_lock_context *flctx = locks_inode_context(inode);
1293 struct file_lock *fl;
1294 int ret;
1295 unsigned int mntflags = NFS_SERVER(inode)->flags;
1296
1297 if (mntflags & NFS_MOUNT_NO_ALIGNWRITE)
1298 return 0;
1299 if (file->f_flags & O_DSYNC)
1300 return 0;
1301 if (!nfs_folio_write_uptodate(folio, pagelen))
1302 return 0;
1303 if (nfs_have_write_delegation(inode))
1304 return 1;
1305 if (!flctx || (list_empty_careful(&flctx->flc_flock) &&
1306 list_empty_careful(&flctx->flc_posix)))
1307 return 1;
1308
1309 /* Check to see if there are whole file write locks */
1310 ret = 0;
1311 spin_lock(&flctx->flc_lock);
1312 if (!list_empty(&flctx->flc_posix)) {
1313 fl = list_first_entry(&flctx->flc_posix, struct file_lock,
1314 c.flc_list);
1315 if (is_whole_file_wrlock(fl))
1316 ret = 1;
1317 } else if (!list_empty(&flctx->flc_flock)) {
1318 fl = list_first_entry(&flctx->flc_flock, struct file_lock,
1319 c.flc_list);
1320 if (lock_is_write(fl))
1321 ret = 1;
1322 }
1323 spin_unlock(&flctx->flc_lock);
1324 return ret;
1325 }
1326
1327 /*
1328 * Update and possibly write a cached page of an NFS file.
1329 *
1330 * XXX: Keep an eye on generic_file_read to make sure it doesn't do bad
1331 * things with a page scheduled for an RPC call (e.g. invalidate it).
1332 */
nfs_update_folio(struct file * file,struct folio * folio,unsigned int offset,unsigned int count)1333 int nfs_update_folio(struct file *file, struct folio *folio,
1334 unsigned int offset, unsigned int count)
1335 {
1336 struct nfs_open_context *ctx = nfs_file_open_context(file);
1337 struct address_space *mapping = folio->mapping;
1338 struct inode *inode = mapping->host;
1339 unsigned int pagelen = nfs_folio_length(folio);
1340 int status = 0;
1341
1342 nfs_inc_stats(inode, NFSIOS_VFSUPDATEPAGE);
1343
1344 dprintk("NFS: nfs_update_folio(%pD2 %d@%lld)\n", file, count,
1345 (long long)(folio_pos(folio) + offset));
1346
1347 if (!count)
1348 goto out;
1349
1350 if (nfs_can_extend_write(file, folio, pagelen)) {
1351 unsigned int end = count + offset;
1352
1353 offset = round_down(offset, PAGE_SIZE);
1354 if (end < pagelen)
1355 end = min(round_up(end, PAGE_SIZE), pagelen);
1356 count = end - offset;
1357 }
1358
1359 status = nfs_writepage_setup(ctx, folio, offset, count);
1360 if (status < 0)
1361 nfs_set_pageerror(mapping);
1362 out:
1363 dprintk("NFS: nfs_update_folio returns %d (isize %lld)\n",
1364 status, (long long)i_size_read(inode));
1365 return status;
1366 }
1367
flush_task_priority(int how)1368 static int flush_task_priority(int how)
1369 {
1370 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
1371 case FLUSH_HIGHPRI:
1372 return RPC_PRIORITY_HIGH;
1373 case FLUSH_LOWPRI:
1374 return RPC_PRIORITY_LOW;
1375 }
1376 return RPC_PRIORITY_NORMAL;
1377 }
1378
nfs_initiate_write(struct nfs_pgio_header * hdr,struct rpc_message * msg,const struct nfs_rpc_ops * rpc_ops,struct rpc_task_setup * task_setup_data,int how)1379 static void nfs_initiate_write(struct nfs_pgio_header *hdr,
1380 struct rpc_message *msg,
1381 const struct nfs_rpc_ops *rpc_ops,
1382 struct rpc_task_setup *task_setup_data, int how)
1383 {
1384 int priority = flush_task_priority(how);
1385
1386 if (IS_SWAPFILE(hdr->inode))
1387 task_setup_data->flags |= RPC_TASK_SWAPPER;
1388 task_setup_data->priority = priority;
1389 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client);
1390 trace_nfs_initiate_write(hdr);
1391 }
1392
1393 /* If a nfs_flush_* function fails, it should remove reqs from @head and
1394 * call this on each, which will prepare them to be retried on next
1395 * writeback using standard nfs.
1396 */
nfs_redirty_request(struct nfs_page * req)1397 static void nfs_redirty_request(struct nfs_page *req)
1398 {
1399 struct nfs_inode *nfsi = NFS_I(nfs_page_to_inode(req));
1400
1401 /* Bump the transmission count */
1402 req->wb_nio++;
1403 nfs_mark_request_dirty(req);
1404 atomic_long_inc(&nfsi->redirtied_pages);
1405 nfs_page_end_writeback(req);
1406 nfs_release_request(req);
1407 }
1408
nfs_async_write_error(struct list_head * head,int error)1409 static void nfs_async_write_error(struct list_head *head, int error)
1410 {
1411 struct nfs_page *req;
1412
1413 while (!list_empty(head)) {
1414 req = nfs_list_entry(head->next);
1415 nfs_list_remove_request(req);
1416 if (nfs_error_is_fatal_on_server(error))
1417 nfs_write_error(req, error);
1418 else
1419 nfs_redirty_request(req);
1420 }
1421 }
1422
nfs_async_write_reschedule_io(struct nfs_pgio_header * hdr)1423 static void nfs_async_write_reschedule_io(struct nfs_pgio_header *hdr)
1424 {
1425 nfs_async_write_error(&hdr->pages, 0);
1426 }
1427
1428 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1429 .init_hdr = nfs_async_write_init,
1430 .error_cleanup = nfs_async_write_error,
1431 .completion = nfs_write_completion,
1432 .reschedule_io = nfs_async_write_reschedule_io,
1433 };
1434
nfs_pageio_init_write(struct nfs_pageio_descriptor * pgio,struct inode * inode,int ioflags,bool force_mds,const struct nfs_pgio_completion_ops * compl_ops)1435 void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1436 struct inode *inode, int ioflags, bool force_mds,
1437 const struct nfs_pgio_completion_ops *compl_ops)
1438 {
1439 struct nfs_server *server = NFS_SERVER(inode);
1440 const struct nfs_pageio_ops *pg_ops = &nfs_pgio_rw_ops;
1441
1442 #ifdef CONFIG_NFS_V4_1
1443 if (server->pnfs_curr_ld && !force_mds)
1444 pg_ops = server->pnfs_curr_ld->pg_write_ops;
1445 #endif
1446 nfs_pageio_init(pgio, inode, pg_ops, compl_ops, &nfs_rw_write_ops,
1447 server->wsize, ioflags);
1448 }
1449 EXPORT_SYMBOL_GPL(nfs_pageio_init_write);
1450
nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor * pgio)1451 void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1452 {
1453 struct nfs_pgio_mirror *mirror;
1454
1455 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup)
1456 pgio->pg_ops->pg_cleanup(pgio);
1457
1458 pgio->pg_ops = &nfs_pgio_rw_ops;
1459
1460 nfs_pageio_stop_mirroring(pgio);
1461
1462 mirror = &pgio->pg_mirrors[0];
1463 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize;
1464 }
1465 EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1466
1467
nfs_commit_prepare(struct rpc_task * task,void * calldata)1468 void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1469 {
1470 struct nfs_commit_data *data = calldata;
1471
1472 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1473 }
1474
nfs_writeback_check_extend(struct nfs_pgio_header * hdr,struct nfs_fattr * fattr)1475 static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1476 struct nfs_fattr *fattr)
1477 {
1478 struct nfs_pgio_args *argp = &hdr->args;
1479 struct nfs_pgio_res *resp = &hdr->res;
1480 u64 size = argp->offset + resp->count;
1481
1482 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1483 fattr->size = size;
1484 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1485 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1486 return;
1487 }
1488 if (size != fattr->size)
1489 return;
1490 /* Set attribute barrier */
1491 nfs_fattr_set_barrier(fattr);
1492 /* ...and update size */
1493 fattr->valid |= NFS_ATTR_FATTR_SIZE;
1494 }
1495
nfs_writeback_update_inode(struct nfs_pgio_header * hdr)1496 void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1497 {
1498 struct nfs_fattr *fattr = &hdr->fattr;
1499 struct inode *inode = hdr->inode;
1500
1501 if (nfs_have_delegated_mtime(inode)) {
1502 spin_lock(&inode->i_lock);
1503 nfs_set_cache_invalid(inode, NFS_INO_INVALID_BLOCKS);
1504 spin_unlock(&inode->i_lock);
1505 return;
1506 }
1507
1508 spin_lock(&inode->i_lock);
1509 nfs_writeback_check_extend(hdr, fattr);
1510 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
1511 spin_unlock(&inode->i_lock);
1512 }
1513 EXPORT_SYMBOL_GPL(nfs_writeback_update_inode);
1514
1515 /*
1516 * This function is called when the WRITE call is complete.
1517 */
nfs_writeback_done(struct rpc_task * task,struct nfs_pgio_header * hdr,struct inode * inode)1518 static int nfs_writeback_done(struct rpc_task *task,
1519 struct nfs_pgio_header *hdr,
1520 struct inode *inode)
1521 {
1522 int status;
1523
1524 /*
1525 * ->write_done will attempt to use post-op attributes to detect
1526 * conflicting writes by other clients. A strict interpretation
1527 * of close-to-open would allow us to continue caching even if
1528 * another writer had changed the file, but some applications
1529 * depend on tighter cache coherency when writing.
1530 */
1531 status = NFS_PROTO(inode)->write_done(task, hdr);
1532 if (status != 0)
1533 return status;
1534
1535 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count);
1536 trace_nfs_writeback_done(task, hdr);
1537
1538 if (task->tk_status >= 0) {
1539 enum nfs3_stable_how committed = hdr->res.verf->committed;
1540
1541 if (committed == NFS_UNSTABLE) {
1542 /*
1543 * We have some uncommitted data on the server at
1544 * this point, so ensure that we keep track of that
1545 * fact irrespective of what later writes do.
1546 */
1547 set_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags);
1548 }
1549
1550 if (committed < hdr->args.stable) {
1551 /* We tried a write call, but the server did not
1552 * commit data to stable storage even though we
1553 * requested it.
1554 * Note: There is a known bug in Tru64 < 5.0 in which
1555 * the server reports NFS_DATA_SYNC, but performs
1556 * NFS_FILE_SYNC. We therefore implement this checking
1557 * as a dprintk() in order to avoid filling syslog.
1558 */
1559 static unsigned long complain;
1560
1561 /* Note this will print the MDS for a DS write */
1562 if (time_before(complain, jiffies)) {
1563 dprintk("NFS: faulty NFS server %s:"
1564 " (committed = %d) != (stable = %d)\n",
1565 NFS_SERVER(inode)->nfs_client->cl_hostname,
1566 committed, hdr->args.stable);
1567 complain = jiffies + 300 * HZ;
1568 }
1569 }
1570 }
1571
1572 /* Deal with the suid/sgid bit corner case */
1573 if (nfs_should_remove_suid(inode)) {
1574 spin_lock(&inode->i_lock);
1575 nfs_set_cache_invalid(inode, NFS_INO_INVALID_MODE);
1576 spin_unlock(&inode->i_lock);
1577 }
1578 return 0;
1579 }
1580
1581 /*
1582 * This function is called when the WRITE call is complete.
1583 */
nfs_writeback_result(struct rpc_task * task,struct nfs_pgio_header * hdr)1584 static void nfs_writeback_result(struct rpc_task *task,
1585 struct nfs_pgio_header *hdr)
1586 {
1587 struct nfs_pgio_args *argp = &hdr->args;
1588 struct nfs_pgio_res *resp = &hdr->res;
1589
1590 if (resp->count < argp->count) {
1591 static unsigned long complain;
1592
1593 /* This a short write! */
1594 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE);
1595
1596 /* Has the server at least made some progress? */
1597 if (resp->count == 0) {
1598 if (time_before(complain, jiffies)) {
1599 printk(KERN_WARNING
1600 "NFS: Server wrote zero bytes, expected %u.\n",
1601 argp->count);
1602 complain = jiffies + 300 * HZ;
1603 }
1604 nfs_set_pgio_error(hdr, -EIO, argp->offset);
1605 task->tk_status = -EIO;
1606 return;
1607 }
1608
1609 /* For non rpc-based layout drivers, retry-through-MDS */
1610 if (!task->tk_ops) {
1611 hdr->pnfs_error = -EAGAIN;
1612 return;
1613 }
1614
1615 /* Was this an NFSv2 write or an NFSv3 stable write? */
1616 if (resp->verf->committed != NFS_UNSTABLE) {
1617 /* Resend from where the server left off */
1618 hdr->mds_offset += resp->count;
1619 argp->offset += resp->count;
1620 argp->pgbase += resp->count;
1621 argp->count -= resp->count;
1622 } else {
1623 /* Resend as a stable write in order to avoid
1624 * headaches in the case of a server crash.
1625 */
1626 argp->stable = NFS_FILE_SYNC;
1627 }
1628 resp->count = 0;
1629 resp->verf->committed = 0;
1630 rpc_restart_call_prepare(task);
1631 }
1632 }
1633
wait_on_commit(struct nfs_mds_commit_info * cinfo)1634 static int wait_on_commit(struct nfs_mds_commit_info *cinfo)
1635 {
1636 return wait_var_event_killable(&cinfo->rpcs_out,
1637 !atomic_read(&cinfo->rpcs_out));
1638 }
1639
nfs_commit_begin(struct nfs_mds_commit_info * cinfo)1640 void nfs_commit_begin(struct nfs_mds_commit_info *cinfo)
1641 {
1642 atomic_inc(&cinfo->rpcs_out);
1643 }
1644
nfs_commit_end(struct nfs_mds_commit_info * cinfo)1645 bool nfs_commit_end(struct nfs_mds_commit_info *cinfo)
1646 {
1647 if (atomic_dec_and_test(&cinfo->rpcs_out)) {
1648 wake_up_var(&cinfo->rpcs_out);
1649 return true;
1650 }
1651 return false;
1652 }
1653
nfs_commitdata_release(struct nfs_commit_data * data)1654 void nfs_commitdata_release(struct nfs_commit_data *data)
1655 {
1656 put_nfs_open_context(data->context);
1657 nfs_commit_free(data);
1658 }
1659 EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1660
nfs_initiate_commit(struct rpc_clnt * clnt,struct nfs_commit_data * data,const struct nfs_rpc_ops * nfs_ops,const struct rpc_call_ops * call_ops,int how,int flags,struct nfsd_file * localio)1661 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1662 const struct nfs_rpc_ops *nfs_ops,
1663 const struct rpc_call_ops *call_ops,
1664 int how, int flags,
1665 struct nfsd_file *localio)
1666 {
1667 struct rpc_task *task;
1668 int priority = flush_task_priority(how);
1669 struct rpc_message msg = {
1670 .rpc_argp = &data->args,
1671 .rpc_resp = &data->res,
1672 .rpc_cred = data->cred,
1673 };
1674 struct rpc_task_setup task_setup_data = {
1675 .task = &data->task,
1676 .rpc_client = clnt,
1677 .rpc_message = &msg,
1678 .callback_ops = call_ops,
1679 .callback_data = data,
1680 .workqueue = nfsiod_workqueue,
1681 .flags = RPC_TASK_ASYNC | flags,
1682 .priority = priority,
1683 };
1684
1685 if (nfs_server_capable(data->inode, NFS_CAP_MOVEABLE))
1686 task_setup_data.flags |= RPC_TASK_MOVEABLE;
1687
1688 /* Set up the initial task struct. */
1689 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client);
1690 trace_nfs_initiate_commit(data);
1691
1692 dprintk("NFS: initiated commit call\n");
1693
1694 if (localio)
1695 return nfs_local_commit(localio, data, call_ops, how);
1696
1697 task = rpc_run_task(&task_setup_data);
1698 if (IS_ERR(task))
1699 return PTR_ERR(task);
1700 if (how & FLUSH_SYNC)
1701 rpc_wait_for_completion_task(task);
1702 rpc_put_task(task);
1703 return 0;
1704 }
1705 EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1706
nfs_get_lwb(struct list_head * head)1707 static loff_t nfs_get_lwb(struct list_head *head)
1708 {
1709 loff_t lwb = 0;
1710 struct nfs_page *req;
1711
1712 list_for_each_entry(req, head, wb_list)
1713 if (lwb < (req_offset(req) + req->wb_bytes))
1714 lwb = req_offset(req) + req->wb_bytes;
1715
1716 return lwb;
1717 }
1718
1719 /*
1720 * Set up the argument/result storage required for the RPC call.
1721 */
nfs_init_commit(struct nfs_commit_data * data,struct list_head * head,struct pnfs_layout_segment * lseg,struct nfs_commit_info * cinfo)1722 void nfs_init_commit(struct nfs_commit_data *data,
1723 struct list_head *head,
1724 struct pnfs_layout_segment *lseg,
1725 struct nfs_commit_info *cinfo)
1726 {
1727 struct nfs_page *first;
1728 struct nfs_open_context *ctx;
1729 struct inode *inode;
1730
1731 /* Set up the RPC argument and reply structs
1732 * NB: take care not to mess about with data->commit et al. */
1733
1734 if (head)
1735 list_splice_init(head, &data->pages);
1736
1737 first = nfs_list_entry(data->pages.next);
1738 ctx = nfs_req_openctx(first);
1739 inode = d_inode(ctx->dentry);
1740
1741 data->inode = inode;
1742 data->cred = ctx->cred;
1743 data->lseg = lseg; /* reference transferred */
1744 /* only set lwb for pnfs commit */
1745 if (lseg)
1746 data->lwb = nfs_get_lwb(&data->pages);
1747 data->mds_ops = &nfs_commit_ops;
1748 data->completion_ops = cinfo->completion_ops;
1749 data->dreq = cinfo->dreq;
1750
1751 data->args.fh = NFS_FH(data->inode);
1752 /* Note: we always request a commit of the entire inode */
1753 data->args.offset = 0;
1754 data->args.count = 0;
1755 data->context = get_nfs_open_context(ctx);
1756 data->res.fattr = &data->fattr;
1757 data->res.verf = &data->verf;
1758 nfs_fattr_init(&data->fattr);
1759 nfs_commit_begin(cinfo->mds);
1760 }
1761 EXPORT_SYMBOL_GPL(nfs_init_commit);
1762
nfs_retry_commit(struct list_head * page_list,struct pnfs_layout_segment * lseg,struct nfs_commit_info * cinfo,u32 ds_commit_idx)1763 void nfs_retry_commit(struct list_head *page_list,
1764 struct pnfs_layout_segment *lseg,
1765 struct nfs_commit_info *cinfo,
1766 u32 ds_commit_idx)
1767 {
1768 struct nfs_page *req;
1769
1770 while (!list_empty(page_list)) {
1771 req = nfs_list_entry(page_list->next);
1772 nfs_list_remove_request(req);
1773 nfs_mark_request_commit(req, lseg, cinfo, ds_commit_idx);
1774 nfs_folio_clear_commit(nfs_page_to_folio(req));
1775 nfs_unlock_and_release_request(req);
1776 }
1777 }
1778 EXPORT_SYMBOL_GPL(nfs_retry_commit);
1779
nfs_commit_resched_write(struct nfs_commit_info * cinfo,struct nfs_page * req)1780 static void nfs_commit_resched_write(struct nfs_commit_info *cinfo,
1781 struct nfs_page *req)
1782 {
1783 struct folio *folio = nfs_page_to_folio(req);
1784
1785 filemap_dirty_folio(folio_mapping(folio), folio);
1786 }
1787
1788 /*
1789 * Commit dirty pages
1790 */
1791 static int
nfs_commit_list(struct inode * inode,struct list_head * head,int how,struct nfs_commit_info * cinfo)1792 nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1793 struct nfs_commit_info *cinfo)
1794 {
1795 struct nfs_commit_data *data;
1796 struct nfsd_file *localio;
1797 unsigned short task_flags = 0;
1798
1799 /* another commit raced with us */
1800 if (list_empty(head))
1801 return 0;
1802
1803 data = nfs_commitdata_alloc();
1804 if (!data) {
1805 nfs_retry_commit(head, NULL, cinfo, -1);
1806 return -ENOMEM;
1807 }
1808
1809 /* Set up the argument struct */
1810 nfs_init_commit(data, head, NULL, cinfo);
1811 if (NFS_SERVER(inode)->nfs_client->cl_minorversion)
1812 task_flags = RPC_TASK_MOVEABLE;
1813
1814 localio = nfs_local_open_fh(NFS_SERVER(inode)->nfs_client, data->cred,
1815 data->args.fh, &data->context->nfl,
1816 data->context->mode);
1817 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode),
1818 data->mds_ops, how,
1819 RPC_TASK_CRED_NOREF | task_flags, localio);
1820 }
1821
1822 /*
1823 * COMMIT call returned
1824 */
nfs_commit_done(struct rpc_task * task,void * calldata)1825 static void nfs_commit_done(struct rpc_task *task, void *calldata)
1826 {
1827 struct nfs_commit_data *data = calldata;
1828
1829 /* Call the NFS version-specific code */
1830 NFS_PROTO(data->inode)->commit_done(task, data);
1831 trace_nfs_commit_done(task, data);
1832 }
1833
nfs_commit_release_pages(struct nfs_commit_data * data)1834 static void nfs_commit_release_pages(struct nfs_commit_data *data)
1835 {
1836 const struct nfs_writeverf *verf = data->res.verf;
1837 struct nfs_page *req;
1838 int status = data->task.tk_status;
1839 struct nfs_commit_info cinfo;
1840 struct folio *folio;
1841
1842 while (!list_empty(&data->pages)) {
1843 req = nfs_list_entry(data->pages.next);
1844 nfs_list_remove_request(req);
1845 folio = nfs_page_to_folio(req);
1846 nfs_folio_clear_commit(folio);
1847
1848 dprintk("NFS: commit (%s/%llu %d@%lld)",
1849 nfs_req_openctx(req)->dentry->d_sb->s_id,
1850 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)),
1851 req->wb_bytes,
1852 (long long)req_offset(req));
1853 if (status < 0) {
1854 if (folio) {
1855 trace_nfs_commit_error(data->inode, req,
1856 status);
1857 nfs_mapping_set_error(folio, status);
1858 nfs_inode_remove_request(req);
1859 }
1860 dprintk_cont(", error = %d\n", status);
1861 goto next;
1862 }
1863
1864 /* Okay, COMMIT succeeded, apparently. Check the verifier
1865 * returned by the server against all stored verfs. */
1866 if (nfs_write_match_verf(verf, req)) {
1867 /* We have a match */
1868 if (folio)
1869 nfs_inode_remove_request(req);
1870 dprintk_cont(" OK\n");
1871 goto next;
1872 }
1873 /* We have a mismatch. Write the page again */
1874 dprintk_cont(" mismatch\n");
1875 nfs_mark_request_dirty(req);
1876 atomic_long_inc(&NFS_I(data->inode)->redirtied_pages);
1877 next:
1878 nfs_unlock_and_release_request(req);
1879 /* Latency breaker */
1880 cond_resched();
1881 }
1882
1883 nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1884 nfs_commit_end(cinfo.mds);
1885 }
1886
nfs_commit_release(void * calldata)1887 static void nfs_commit_release(void *calldata)
1888 {
1889 struct nfs_commit_data *data = calldata;
1890
1891 data->completion_ops->completion(data);
1892 nfs_commitdata_release(calldata);
1893 }
1894
1895 static const struct rpc_call_ops nfs_commit_ops = {
1896 .rpc_call_prepare = nfs_commit_prepare,
1897 .rpc_call_done = nfs_commit_done,
1898 .rpc_release = nfs_commit_release,
1899 };
1900
1901 static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1902 .completion = nfs_commit_release_pages,
1903 .resched_write = nfs_commit_resched_write,
1904 };
1905
nfs_generic_commit_list(struct inode * inode,struct list_head * head,int how,struct nfs_commit_info * cinfo)1906 int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1907 int how, struct nfs_commit_info *cinfo)
1908 {
1909 int status;
1910
1911 status = pnfs_commit_list(inode, head, how, cinfo);
1912 if (status == PNFS_NOT_ATTEMPTED)
1913 status = nfs_commit_list(inode, head, how, cinfo);
1914 return status;
1915 }
1916
__nfs_commit_inode(struct inode * inode,int how,struct writeback_control * wbc)1917 static int __nfs_commit_inode(struct inode *inode, int how,
1918 struct writeback_control *wbc)
1919 {
1920 LIST_HEAD(head);
1921 struct nfs_commit_info cinfo;
1922 int may_wait = how & FLUSH_SYNC;
1923 int ret, nscan;
1924
1925 how &= ~FLUSH_SYNC;
1926 nfs_init_cinfo_from_inode(&cinfo, inode);
1927 nfs_commit_begin(cinfo.mds);
1928 for (;;) {
1929 ret = nscan = nfs_scan_commit(inode, &head, &cinfo);
1930 if (ret <= 0)
1931 break;
1932 ret = nfs_generic_commit_list(inode, &head, how, &cinfo);
1933 if (ret < 0)
1934 break;
1935 ret = 0;
1936 if (wbc && wbc->sync_mode == WB_SYNC_NONE) {
1937 if (nscan < wbc->nr_to_write)
1938 wbc->nr_to_write -= nscan;
1939 else
1940 wbc->nr_to_write = 0;
1941 }
1942 if (nscan < INT_MAX)
1943 break;
1944 cond_resched();
1945 }
1946 nfs_commit_end(cinfo.mds);
1947 if (ret || !may_wait)
1948 return ret;
1949 return wait_on_commit(cinfo.mds);
1950 }
1951
nfs_commit_inode(struct inode * inode,int how)1952 int nfs_commit_inode(struct inode *inode, int how)
1953 {
1954 return __nfs_commit_inode(inode, how, NULL);
1955 }
1956 EXPORT_SYMBOL_GPL(nfs_commit_inode);
1957
nfs_write_inode(struct inode * inode,struct writeback_control * wbc)1958 int nfs_write_inode(struct inode *inode, struct writeback_control *wbc)
1959 {
1960 struct nfs_inode *nfsi = NFS_I(inode);
1961 int flags = FLUSH_SYNC;
1962 int ret = 0;
1963
1964 if (wbc->sync_mode == WB_SYNC_NONE) {
1965 /* no commits means nothing needs to be done */
1966 if (!atomic_long_read(&nfsi->commit_info.ncommit))
1967 goto check_requests_outstanding;
1968
1969 /* Don't commit yet if this is a non-blocking flush and there
1970 * are a lot of outstanding writes for this mapping.
1971 */
1972 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK))
1973 goto out_mark_dirty;
1974
1975 /* don't wait for the COMMIT response */
1976 flags = 0;
1977 }
1978
1979 ret = __nfs_commit_inode(inode, flags, wbc);
1980 if (!ret) {
1981 if (flags & FLUSH_SYNC)
1982 return 0;
1983 } else if (atomic_long_read(&nfsi->commit_info.ncommit))
1984 goto out_mark_dirty;
1985
1986 check_requests_outstanding:
1987 if (!atomic_read(&nfsi->commit_info.rpcs_out))
1988 return ret;
1989 out_mark_dirty:
1990 __mark_inode_dirty(inode, I_DIRTY_DATASYNC);
1991 return ret;
1992 }
1993 EXPORT_SYMBOL_GPL(nfs_write_inode);
1994
1995 /*
1996 * Wrapper for filemap_write_and_wait_range()
1997 *
1998 * Needed for pNFS in order to ensure data becomes visible to the
1999 * client.
2000 */
nfs_filemap_write_and_wait_range(struct address_space * mapping,loff_t lstart,loff_t lend)2001 int nfs_filemap_write_and_wait_range(struct address_space *mapping,
2002 loff_t lstart, loff_t lend)
2003 {
2004 int ret;
2005
2006 ret = filemap_write_and_wait_range(mapping, lstart, lend);
2007 if (ret == 0)
2008 ret = pnfs_sync_inode(mapping->host, true);
2009 return ret;
2010 }
2011 EXPORT_SYMBOL_GPL(nfs_filemap_write_and_wait_range);
2012
2013 /*
2014 * flush the inode to disk.
2015 */
nfs_wb_all(struct inode * inode)2016 int nfs_wb_all(struct inode *inode)
2017 {
2018 int ret;
2019
2020 trace_nfs_writeback_inode_enter(inode);
2021
2022 ret = filemap_write_and_wait(inode->i_mapping);
2023 if (ret)
2024 goto out;
2025 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2026 if (ret < 0)
2027 goto out;
2028 pnfs_sync_inode(inode, true);
2029 ret = 0;
2030
2031 out:
2032 trace_nfs_writeback_inode_exit(inode, ret);
2033 return ret;
2034 }
2035 EXPORT_SYMBOL_GPL(nfs_wb_all);
2036
nfs_wb_folio_cancel(struct inode * inode,struct folio * folio)2037 int nfs_wb_folio_cancel(struct inode *inode, struct folio *folio)
2038 {
2039 struct nfs_page *req;
2040 int ret = 0;
2041
2042 folio_wait_writeback(folio);
2043
2044 /* blocking call to cancel all requests and join to a single (head)
2045 * request */
2046 req = nfs_lock_and_join_requests(folio);
2047
2048 if (IS_ERR(req)) {
2049 ret = PTR_ERR(req);
2050 } else if (req) {
2051 /* all requests from this folio have been cancelled by
2052 * nfs_lock_and_join_requests, so just remove the head
2053 * request from the inode / page_private pointer and
2054 * release it */
2055 nfs_inode_remove_request(req);
2056 nfs_unlock_and_release_request(req);
2057 }
2058
2059 return ret;
2060 }
2061
2062 /**
2063 * nfs_wb_folio - Write back all requests on one page
2064 * @inode: pointer to page
2065 * @folio: pointer to folio
2066 *
2067 * Assumes that the folio has been locked by the caller, and will
2068 * not unlock it.
2069 */
nfs_wb_folio(struct inode * inode,struct folio * folio)2070 int nfs_wb_folio(struct inode *inode, struct folio *folio)
2071 {
2072 loff_t range_start = folio_pos(folio);
2073 size_t len = folio_size(folio);
2074 struct writeback_control wbc = {
2075 .sync_mode = WB_SYNC_ALL,
2076 .nr_to_write = 0,
2077 .range_start = range_start,
2078 .range_end = range_start + len - 1,
2079 };
2080 int ret;
2081
2082 trace_nfs_writeback_folio(inode, range_start, len);
2083
2084 for (;;) {
2085 folio_wait_writeback(folio);
2086 if (folio_clear_dirty_for_io(folio)) {
2087 ret = nfs_writepage_locked(folio, &wbc);
2088 if (ret < 0)
2089 goto out_error;
2090 continue;
2091 }
2092 ret = 0;
2093 if (!folio_test_private(folio))
2094 break;
2095 ret = nfs_commit_inode(inode, FLUSH_SYNC);
2096 if (ret < 0)
2097 goto out_error;
2098 }
2099 out_error:
2100 trace_nfs_writeback_folio_done(inode, range_start, len, ret);
2101 return ret;
2102 }
2103
2104 #ifdef CONFIG_MIGRATION
nfs_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)2105 int nfs_migrate_folio(struct address_space *mapping, struct folio *dst,
2106 struct folio *src, enum migrate_mode mode)
2107 {
2108 /*
2109 * If the private flag is set, the folio is currently associated with
2110 * an in-progress read or write request. Don't try to migrate it.
2111 *
2112 * FIXME: we could do this in principle, but we'll need a way to ensure
2113 * that we can safely release the inode reference while holding
2114 * the folio lock.
2115 */
2116 if (folio_test_private(src))
2117 return -EBUSY;
2118
2119 if (folio_test_private_2(src)) { /* [DEPRECATED] */
2120 if (mode == MIGRATE_ASYNC)
2121 return -EBUSY;
2122 folio_wait_private_2(src);
2123 }
2124
2125 return migrate_folio(mapping, dst, src, mode);
2126 }
2127 #endif
2128
nfs_init_writepagecache(void)2129 int __init nfs_init_writepagecache(void)
2130 {
2131 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
2132 sizeof(struct nfs_pgio_header),
2133 0, SLAB_HWCACHE_ALIGN,
2134 NULL);
2135 if (nfs_wdata_cachep == NULL)
2136 return -ENOMEM;
2137
2138 nfs_wdata_mempool = mempool_create_slab_pool(MIN_POOL_WRITE,
2139 nfs_wdata_cachep);
2140 if (nfs_wdata_mempool == NULL)
2141 goto out_destroy_write_cache;
2142
2143 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
2144 sizeof(struct nfs_commit_data),
2145 0, SLAB_HWCACHE_ALIGN,
2146 NULL);
2147 if (nfs_cdata_cachep == NULL)
2148 goto out_destroy_write_mempool;
2149
2150 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
2151 nfs_cdata_cachep);
2152 if (nfs_commit_mempool == NULL)
2153 goto out_destroy_commit_cache;
2154
2155 /*
2156 * NFS congestion size, scale with available memory.
2157 *
2158 * 64MB: 8192k
2159 * 128MB: 11585k
2160 * 256MB: 16384k
2161 * 512MB: 23170k
2162 * 1GB: 32768k
2163 * 2GB: 46340k
2164 * 4GB: 65536k
2165 * 8GB: 92681k
2166 * 16GB: 131072k
2167 *
2168 * This allows larger machines to have larger/more transfers.
2169 * Limit the default to 256M
2170 */
2171 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10);
2172 if (nfs_congestion_kb > 256*1024)
2173 nfs_congestion_kb = 256*1024;
2174
2175 return 0;
2176
2177 out_destroy_commit_cache:
2178 kmem_cache_destroy(nfs_cdata_cachep);
2179 out_destroy_write_mempool:
2180 mempool_destroy(nfs_wdata_mempool);
2181 out_destroy_write_cache:
2182 kmem_cache_destroy(nfs_wdata_cachep);
2183 return -ENOMEM;
2184 }
2185
nfs_destroy_writepagecache(void)2186 void nfs_destroy_writepagecache(void)
2187 {
2188 mempool_destroy(nfs_commit_mempool);
2189 kmem_cache_destroy(nfs_cdata_cachep);
2190 mempool_destroy(nfs_wdata_mempool);
2191 kmem_cache_destroy(nfs_wdata_cachep);
2192 }
2193
2194 static const struct nfs_rw_ops nfs_rw_write_ops = {
2195 .rw_alloc_header = nfs_writehdr_alloc,
2196 .rw_free_header = nfs_writehdr_free,
2197 .rw_done = nfs_writeback_done,
2198 .rw_result = nfs_writeback_result,
2199 .rw_initiate = nfs_initiate_write,
2200 };
2201