Lines Matching +full:data +full:- +full:mapping
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Write file data over NFS.
23 #include <linux/backing-dev.h>
48 void (*complete)(void *data);
49 void *data; member
83 INIT_LIST_HEAD(&p->pages); in nfs_commitdata_alloc()
105 p->rw_mode = FMODE_WRITE; in nfs_writehdr_alloc()
120 void (*complete)(void *), void *data) in nfs_io_completion_init() argument
122 ioc->complete = complete; in nfs_io_completion_init()
123 ioc->data = data; in nfs_io_completion_init()
124 kref_init(&ioc->refcount); in nfs_io_completion_init()
131 ioc->complete(ioc->data); in nfs_io_completion_release()
138 kref_get(&ioc->refcount); in nfs_io_completion_get()
144 kref_put(&ioc->refcount, nfs_io_completion_release); in nfs_io_completion_put()
148 * nfs_folio_find_head_request - find head request associated with a folio
157 struct address_space *mapping = folio->mapping; in nfs_folio_find_head_request() local
162 spin_lock(&mapping->i_private_lock); in nfs_folio_find_head_request()
163 req = folio->private; in nfs_folio_find_head_request()
165 WARN_ON_ONCE(req->wb_head != req); in nfs_folio_find_head_request()
166 kref_get(&req->wb_kref); in nfs_folio_find_head_request()
168 spin_unlock(&mapping->i_private_lock); in nfs_folio_find_head_request()
176 struct inode *inode = folio->mapping->host; in nfs_grow_file()
180 spin_lock(&inode->i_lock); in nfs_grow_file()
182 end_index = ((i_size - 1) >> folio_shift(folio)) << folio_order(folio); in nfs_grow_file()
183 if (i_size > 0 && folio->index < end_index) in nfs_grow_file()
190 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_SIZE; in nfs_grow_file()
195 spin_unlock(&inode->i_lock); in nfs_grow_file()
200 static void nfs_set_pageerror(struct address_space *mapping) in nfs_set_pageerror() argument
202 struct inode *inode = mapping->host; in nfs_set_pageerror()
204 nfs_zap_mapping(mapping->host, mapping); in nfs_set_pageerror()
206 spin_lock(&inode->i_lock); in nfs_set_pageerror()
210 spin_unlock(&inode->i_lock); in nfs_set_pageerror()
215 struct address_space *mapping = folio->mapping; in nfs_mapping_set_error() local
217 filemap_set_wb_err(mapping, error); in nfs_mapping_set_error()
218 if (mapping->host) in nfs_mapping_set_error()
219 errseq_set(&mapping->host->i_sb->s_wb_err, in nfs_mapping_set_error()
220 error == -ENOSPC ? -ENOSPC : -EIO); in nfs_mapping_set_error()
221 nfs_set_pageerror(mapping); in nfs_mapping_set_error()
226 * @head - head request of page group
227 * @page_offset - offset into page
244 if (page_offset >= req->wb_pgbase && in nfs_page_group_search_locked()
245 page_offset < (req->wb_pgbase + req->wb_bytes)) in nfs_page_group_search_locked()
248 req = req->wb_this_page; in nfs_page_group_search_locked()
256 * @head - head request of page group
270 tmp = nfs_page_group_search_locked(req->wb_head, pos); in nfs_page_group_covers_page()
273 pos = tmp->wb_pgbase + tmp->wb_bytes; in nfs_page_group_covers_page()
298 if (wbc->sync_mode == WB_SYNC_ALL) in wb_priority()
309 #define NFS_CONGESTION_ON_THRESH (nfs_congestion_kb >> (PAGE_SHIFT-10))
311 (NFS_CONGESTION_ON_THRESH - (NFS_CONGESTION_ON_THRESH >> 2))
315 struct nfs_server *nfss = NFS_SERVER(folio->mapping->host); in nfs_folio_set_writeback()
318 if (atomic_long_inc_return(&nfss->writeback) > NFS_CONGESTION_ON_THRESH) in nfs_folio_set_writeback()
319 nfss->write_congested = 1; in nfs_folio_set_writeback()
324 struct nfs_server *nfss = NFS_SERVER(folio->mapping->host); in nfs_folio_end_writeback()
327 if (atomic_long_dec_return(&nfss->writeback) < in nfs_folio_end_writeback()
329 nfss->write_congested = 0; in nfs_folio_end_writeback()
330 wake_up_all(&nfss->write_congestion_wait); in nfs_folio_end_writeback()
344 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
346 * @destroy_list - request list (using wb_this_page) terminated by @old_head
347 * @old_head - the old head of the list
361 destroy_list = (subreq->wb_this_page == old_head) ? in nfs_destroy_unlinked_subrequests()
362 NULL : subreq->wb_this_page; in nfs_destroy_unlinked_subrequests()
364 /* Note: lock subreq in order to change subreq->wb_head */ in nfs_destroy_unlinked_subrequests()
366 WARN_ON_ONCE(old_head != subreq->wb_head); in nfs_destroy_unlinked_subrequests()
369 subreq->wb_this_page = subreq; in nfs_destroy_unlinked_subrequests()
370 subreq->wb_head = subreq; in nfs_destroy_unlinked_subrequests()
372 clear_bit(PG_REMOVE, &subreq->wb_flags); in nfs_destroy_unlinked_subrequests()
375 if (!kref_read(&subreq->wb_kref)) { in nfs_destroy_unlinked_subrequests()
377 if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) { in nfs_destroy_unlinked_subrequests()
388 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) { in nfs_destroy_unlinked_subrequests()
390 atomic_long_dec(&NFS_I(inode)->nrequests); in nfs_destroy_unlinked_subrequests()
400 * nfs_join_page_group - destroy subrequests of the head req
417 pgbase = head->wb_pgbase; in nfs_join_page_group()
418 bytes = head->wb_bytes; in nfs_join_page_group()
419 off = head->wb_offset; in nfs_join_page_group()
420 for (subreq = head->wb_this_page; subreq != head; in nfs_join_page_group()
421 subreq = subreq->wb_this_page) { in nfs_join_page_group()
423 if (pgbase > subreq->wb_pgbase) { in nfs_join_page_group()
424 off -= pgbase - subreq->wb_pgbase; in nfs_join_page_group()
425 bytes += pgbase - subreq->wb_pgbase; in nfs_join_page_group()
426 pgbase = subreq->wb_pgbase; in nfs_join_page_group()
428 bytes = max(subreq->wb_pgbase + subreq->wb_bytes in nfs_join_page_group()
429 - pgbase, bytes); in nfs_join_page_group()
433 head->wb_pgbase = pgbase; in nfs_join_page_group()
434 head->wb_bytes = bytes; in nfs_join_page_group()
435 head->wb_offset = off; in nfs_join_page_group()
442 subreq = subreq->wb_this_page; in nfs_join_page_group()
446 if (head->wb_this_page != head) { in nfs_join_page_group()
448 destroy_list = head->wb_this_page; in nfs_join_page_group()
449 head->wb_this_page = head; in nfs_join_page_group()
456 * nfs_wait_on_request - Wait for a request to complete.
464 if (!test_bit(PG_BUSY, &req->wb_flags)) in nfs_wait_on_request()
466 set_bit(PG_CONTENDED2, &req->wb_flags); in nfs_wait_on_request()
468 return wait_on_bit_io(&req->wb_flags, PG_BUSY, in nfs_wait_on_request()
473 * nfs_unroll_locks - unlock all newly locked reqs and wait on @req
486 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { in nfs_unroll_locks()
487 if (!kref_read(&tmp->wb_kref)) in nfs_unroll_locks()
494 * nfs_page_group_lock_subreq - try to lock a subrequest
507 if (!kref_get_unless_zero(&subreq->wb_kref)) in nfs_page_group_lock_subreq()
524 * nfs_lock_and_join_requests - join all subreqs to the head req
533 * Returns a locked, referenced pointer to the head request - which after
540 struct inode *inode = folio->mapping->host; in nfs_lock_and_join_requests()
548 * reference to the whole page group - the group will not be destroyed in nfs_lock_and_join_requests()
563 if (head != folio->private) { in nfs_lock_and_join_requests()
572 removed = test_bit(PG_REMOVE, &head->wb_flags); in nfs_lock_and_join_requests()
575 for (subreq = head->wb_this_page; in nfs_lock_and_join_requests()
577 subreq = subreq->wb_this_page) { in nfs_lock_and_join_requests()
578 if (test_bit(PG_REMOVE, &subreq->wb_flags)) in nfs_lock_and_join_requests()
639 WARN_ON_ONCE(test_bit(PG_CLEAN, &req->wb_flags)); in nfs_page_async_flush()
642 ret = pgio->pg_error; in nfs_page_async_flush()
648 ret = pgio->pg_error; in nfs_page_async_flush()
654 if (wbc->sync_mode == WB_SYNC_NONE) in nfs_page_async_flush()
658 pgio->pg_error = 0; in nfs_page_async_flush()
660 nfs_add_stats(folio->mapping->host, in nfs_page_async_flush()
672 nfs_pageio_cond_complete(pgio, folio->index); in nfs_do_writepage()
683 struct inode *inode = folio->mapping->host; in nfs_writepage_locked()
696 struct writeback_control *wbc, void *data) in nfs_writepages_callback() argument
700 ret = nfs_do_writepage(folio, wbc, data); in nfs_writepages_callback()
711 int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc) in nfs_writepages() argument
713 struct inode *inode = mapping->host; in nfs_writepages()
716 unsigned int mntflags = NFS_SERVER(inode)->flags; in nfs_writepages()
722 if (wbc->sync_mode == WB_SYNC_NONE && nfss->write_congested) { in nfs_writepages()
723 err = wait_event_killable(nfss->write_congestion_wait, in nfs_writepages()
724 nfss->write_congested == 0); in nfs_writepages()
731 if (!(mntflags & NFS_MOUNT_WRITE_EAGER) || wbc->for_kupdate || in nfs_writepages()
732 wbc->for_background || wbc->for_sync || wbc->for_reclaim) { in nfs_writepages()
744 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, in nfs_writepages()
748 if (err == -EAGAIN && mntflags & NFS_MOUNT_SOFTERR) in nfs_writepages()
766 struct address_space *mapping = folio->mapping; in nfs_inode_add_request() local
767 struct nfs_inode *nfsi = NFS_I(mapping->host); in nfs_inode_add_request()
769 WARN_ON_ONCE(req->wb_this_page != req); in nfs_inode_add_request()
773 spin_lock(&mapping->i_private_lock); in nfs_inode_add_request()
774 set_bit(PG_MAPPED, &req->wb_flags); in nfs_inode_add_request()
776 spin_unlock(&mapping->i_private_lock); in nfs_inode_add_request()
777 atomic_long_inc(&nfsi->nrequests); in nfs_inode_add_request()
778 /* this a head request for a page group - mark it as having an in nfs_inode_add_request()
782 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags)); in nfs_inode_add_request()
783 kref_get(&req->wb_kref); in nfs_inode_add_request()
794 struct folio *folio = nfs_page_to_folio(req->wb_head); in nfs_inode_remove_request()
795 struct address_space *mapping = folio->mapping; in nfs_inode_remove_request() local
797 spin_lock(&mapping->i_private_lock); in nfs_inode_remove_request()
800 clear_bit(PG_MAPPED, &req->wb_head->wb_flags); in nfs_inode_remove_request()
802 spin_unlock(&mapping->i_private_lock); in nfs_inode_remove_request()
805 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) { in nfs_inode_remove_request()
806 atomic_long_dec(&nfsi->nrequests); in nfs_inode_remove_request()
819 * nfs_request_add_commit_list_locked - add request to a commit list
828 * The caller must hold NFS_I(cinfo->inode)->commit_mutex, and the
835 set_bit(PG_CLEAN, &req->wb_flags); in nfs_request_add_commit_list_locked()
837 atomic_long_inc(&cinfo->mds->ncommit); in nfs_request_add_commit_list_locked()
842 * nfs_request_add_commit_list - add request to a commit list
850 * The caller must _not_ hold the cinfo->lock, but must be
856 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); in nfs_request_add_commit_list()
857 nfs_request_add_commit_list_locked(req, &cinfo->mds->list, cinfo); in nfs_request_add_commit_list()
858 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); in nfs_request_add_commit_list()
864 * nfs_request_remove_commit_list - Remove request from a commit list
872 * The caller _must_ hold the cinfo->lock and the nfs_page lock.
878 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) in nfs_request_remove_commit_list()
881 atomic_long_dec(&cinfo->mds->ncommit); in nfs_request_remove_commit_list()
888 cinfo->inode = inode; in nfs_init_cinfo_from_inode()
889 cinfo->mds = &NFS_I(inode)->commit_info; in nfs_init_cinfo_from_inode()
890 cinfo->ds = pnfs_get_ds_info(inode); in nfs_init_cinfo_from_inode()
891 cinfo->dreq = NULL; in nfs_init_cinfo_from_inode()
892 cinfo->completion_ops = &nfs_commit_completion_ops; in nfs_init_cinfo_from_inode()
923 node_stat_mod_folio(folio, NR_WRITEBACK, -nr); in nfs_folio_clear_commit()
924 wb_stat_mod(&inode_to_bdi(folio->mapping->host)->wb, in nfs_folio_clear_commit()
925 WB_WRITEBACK, -nr); in nfs_folio_clear_commit()
933 if (test_bit(PG_CLEAN, &req->wb_flags)) { in nfs_clear_request_commit()
935 struct inode *inode = d_inode(ctx->dentry); in nfs_clear_request_commit()
937 mutex_lock(&NFS_I(inode)->commit_mutex); in nfs_clear_request_commit()
941 mutex_unlock(&NFS_I(inode)->commit_mutex); in nfs_clear_request_commit()
948 if (hdr->verf.committed == NFS_DATA_SYNC) in nfs_write_need_commit()
949 return hdr->lseg == NULL; in nfs_write_need_commit()
950 return hdr->verf.committed != NFS_FILE_SYNC; in nfs_write_need_commit()
955 nfs_io_completion_get(hdr->io_completion); in nfs_async_write_init()
963 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) in nfs_write_completion()
965 nfs_init_cinfo_from_inode(&cinfo, hdr->inode); in nfs_write_completion()
966 while (!list_empty(&hdr->pages)) { in nfs_write_completion()
967 struct nfs_page *req = nfs_list_entry(hdr->pages.next); in nfs_write_completion()
969 bytes += req->wb_bytes; in nfs_write_completion()
971 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && in nfs_write_completion()
972 (hdr->good_bytes < bytes)) { in nfs_write_completion()
973 trace_nfs_comp_error(hdr->inode, req, hdr->error); in nfs_write_completion()
975 hdr->error); in nfs_write_completion()
980 req->wb_nio = 0; in nfs_write_completion()
981 memcpy(&req->wb_verf, &hdr->verf.verifier, sizeof(req->wb_verf)); in nfs_write_completion()
982 nfs_mark_request_commit(req, hdr->lseg, &cinfo, in nfs_write_completion()
983 hdr->pgio_mirror_idx); in nfs_write_completion()
993 nfs_io_completion_put(hdr->io_completion); in nfs_write_completion()
994 hdr->release(hdr); in nfs_write_completion()
1000 return atomic_long_read(&cinfo->mds->ncommit); in nfs_reqs_to_commit()
1003 /* NFS_I(cinfo->inode)->commit_mutex held by caller */
1012 kref_get(&req->wb_kref); in nfs_scan_commit_list()
1018 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); in nfs_scan_commit_list()
1021 if ((ret == max) && !cinfo->dreq) in nfs_scan_commit_list()
1030 * nfs_scan_commit - Scan an inode for commit requests
1044 if (!atomic_long_read(&cinfo->mds->ncommit)) in nfs_scan_commit()
1046 mutex_lock(&NFS_I(cinfo->inode)->commit_mutex); in nfs_scan_commit()
1047 if (atomic_long_read(&cinfo->mds->ncommit) > 0) { in nfs_scan_commit()
1050 ret = nfs_scan_commit_list(&cinfo->mds->list, dst, in nfs_scan_commit()
1052 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret); in nfs_scan_commit()
1054 mutex_unlock(&NFS_I(cinfo->inode)->commit_mutex); in nfs_scan_commit()
1080 rqend = req->wb_offset + req->wb_bytes; in nfs_try_to_update_request()
1083 * the offsets are non-contiguous. in nfs_try_to_update_request()
1087 if (offset > rqend || end < req->wb_offset) in nfs_try_to_update_request()
1091 if (offset < req->wb_offset) { in nfs_try_to_update_request()
1092 req->wb_offset = offset; in nfs_try_to_update_request()
1093 req->wb_pgbase = offset; in nfs_try_to_update_request()
1096 req->wb_bytes = end - req->wb_offset; in nfs_try_to_update_request()
1098 req->wb_bytes = rqend - req->wb_offset; in nfs_try_to_update_request()
1099 req->wb_nio = 0; in nfs_try_to_update_request()
1109 error = nfs_wb_folio(folio->mapping->host, folio); in nfs_try_to_update_request()
1166 * due to the lack of an ACCESS-type call in NFSv2. in nfs_flush_incompatible()
1174 l_ctx = req->wb_lock_context; in nfs_flush_incompatible()
1178 !(list_empty_careful(&flctx->flc_posix) && in nfs_flush_incompatible()
1179 list_empty_careful(&flctx->flc_flock))) { in nfs_flush_incompatible()
1180 do_flush |= l_ctx->lockowner != current->files; in nfs_flush_incompatible()
1185 status = nfs_wb_folio(folio->mapping->host, folio); in nfs_flush_incompatible()
1194 * Returns -EACCES if the key will expire within RPC_KEY_EXPIRE_FAIL.
1206 !rcu_access_pointer(ctx->ll_cred)) in nfs_key_timeout_notify()
1208 return -EACCES; in nfs_key_timeout_notify()
1217 struct rpc_auth *auth = NFS_SERVER(inode)->client->cl_auth; in nfs_ctx_key_to_expire()
1220 .cred = ctx->cred, in nfs_ctx_key_to_expire()
1225 cred = rcu_dereference(ctx->ll_cred); in nfs_ctx_key_to_expire()
1226 if (cred && !(cred->cr_ops->crkey_timeout && in nfs_ctx_key_to_expire()
1227 cred->cr_ops->crkey_timeout(cred))) in nfs_ctx_key_to_expire()
1231 new = auth->au_ops->lookup_cred(auth, &acred, 0); in nfs_ctx_key_to_expire()
1239 } else if (new->cr_ops->crkey_timeout && in nfs_ctx_key_to_expire()
1240 new->cr_ops->crkey_timeout(new)) in nfs_ctx_key_to_expire()
1244 old = rcu_dereference_protected(xchg(&ctx->ll_cred, in nfs_ctx_key_to_expire()
1259 struct inode *inode = folio->mapping->host; in nfs_folio_write_uptodate()
1264 if (nfsi->cache_validity & in nfs_folio_write_uptodate()
1268 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags) && pagelen != 0) in nfs_folio_write_uptodate()
1271 if (nfsi->cache_validity & NFS_INO_INVALID_DATA && pagelen != 0) in nfs_folio_write_uptodate()
1279 return fl->fl_start == 0 && fl->fl_end == OFFSET_MAX && in is_whole_file_wrlock()
1298 unsigned int mntflags = NFS_SERVER(inode)->flags; in nfs_can_extend_write()
1302 if (file->f_flags & O_DSYNC) in nfs_can_extend_write()
1308 if (!flctx || (list_empty_careful(&flctx->flc_flock) && in nfs_can_extend_write()
1309 list_empty_careful(&flctx->flc_posix))) in nfs_can_extend_write()
1314 spin_lock(&flctx->flc_lock); in nfs_can_extend_write()
1315 if (!list_empty(&flctx->flc_posix)) { in nfs_can_extend_write()
1316 fl = list_first_entry(&flctx->flc_posix, struct file_lock, in nfs_can_extend_write()
1320 } else if (!list_empty(&flctx->flc_flock)) { in nfs_can_extend_write()
1321 fl = list_first_entry(&flctx->flc_flock, struct file_lock, in nfs_can_extend_write()
1326 spin_unlock(&flctx->flc_lock); in nfs_can_extend_write()
1340 struct address_space *mapping = folio->mapping; in nfs_update_folio() local
1341 struct inode *inode = mapping->host; in nfs_update_folio()
1359 count = end - offset; in nfs_update_folio()
1364 nfs_set_pageerror(mapping); in nfs_update_folio()
1389 if (IS_SWAPFILE(hdr->inode)) in nfs_initiate_write()
1390 task_setup_data->flags |= RPC_TASK_SWAPPER; in nfs_initiate_write()
1391 task_setup_data->priority = priority; in nfs_initiate_write()
1392 rpc_ops->write_setup(hdr, msg, &task_setup_data->rpc_client); in nfs_initiate_write()
1405 req->wb_nio++; in nfs_redirty_request()
1407 atomic_long_inc(&nfsi->redirtied_pages); in nfs_redirty_request()
1417 req = nfs_list_entry(head->next); in nfs_async_write_error()
1428 nfs_async_write_error(&hdr->pages, 0); in nfs_async_write_reschedule_io()
1446 if (server->pnfs_curr_ld && !force_mds) in nfs_pageio_init_write()
1447 pg_ops = server->pnfs_curr_ld->pg_write_ops; in nfs_pageio_init_write()
1450 server->wsize, ioflags); in nfs_pageio_init_write()
1458 if (pgio->pg_ops && pgio->pg_ops->pg_cleanup) in nfs_pageio_reset_write_mds()
1459 pgio->pg_ops->pg_cleanup(pgio); in nfs_pageio_reset_write_mds()
1461 pgio->pg_ops = &nfs_pgio_rw_ops; in nfs_pageio_reset_write_mds()
1465 mirror = &pgio->pg_mirrors[0]; in nfs_pageio_reset_write_mds()
1466 mirror->pg_bsize = NFS_SERVER(pgio->pg_inode)->wsize; in nfs_pageio_reset_write_mds()
1473 struct nfs_commit_data *data = calldata; in nfs_commit_prepare() local
1475 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data); in nfs_commit_prepare()
1481 struct nfs_pgio_args *argp = &hdr->args; in nfs_writeback_check_extend()
1482 struct nfs_pgio_res *resp = &hdr->res; in nfs_writeback_check_extend()
1483 u64 size = argp->offset + resp->count; in nfs_writeback_check_extend()
1485 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) in nfs_writeback_check_extend()
1486 fattr->size = size; in nfs_writeback_check_extend()
1487 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) { in nfs_writeback_check_extend()
1488 fattr->valid &= ~NFS_ATTR_FATTR_SIZE; in nfs_writeback_check_extend()
1491 if (size != fattr->size) in nfs_writeback_check_extend()
1496 fattr->valid |= NFS_ATTR_FATTR_SIZE; in nfs_writeback_check_extend()
1501 struct nfs_fattr *fattr = &hdr->fattr; in nfs_writeback_update_inode()
1502 struct inode *inode = hdr->inode; in nfs_writeback_update_inode()
1505 spin_lock(&inode->i_lock); in nfs_writeback_update_inode()
1507 spin_unlock(&inode->i_lock); in nfs_writeback_update_inode()
1511 spin_lock(&inode->i_lock); in nfs_writeback_update_inode()
1514 spin_unlock(&inode->i_lock); in nfs_writeback_update_inode()
1528 * ->write_done will attempt to use post-op attributes to detect in nfs_writeback_done()
1530 * of close-to-open would allow us to continue caching even if in nfs_writeback_done()
1534 status = NFS_PROTO(inode)->write_done(task, hdr); in nfs_writeback_done()
1538 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, hdr->res.count); in nfs_writeback_done()
1541 if (task->tk_status >= 0) { in nfs_writeback_done()
1542 enum nfs3_stable_how committed = hdr->res.verf->committed; in nfs_writeback_done()
1546 * We have some uncommitted data on the server at in nfs_writeback_done()
1550 set_bit(NFS_IOHDR_UNSTABLE_WRITES, &hdr->flags); in nfs_writeback_done()
1553 if (committed < hdr->args.stable) { in nfs_writeback_done()
1555 * commit data to stable storage even though we in nfs_writeback_done()
1568 NFS_SERVER(inode)->nfs_client->cl_hostname, in nfs_writeback_done()
1569 committed, hdr->args.stable); in nfs_writeback_done()
1577 spin_lock(&inode->i_lock); in nfs_writeback_done()
1579 spin_unlock(&inode->i_lock); in nfs_writeback_done()
1590 struct nfs_pgio_args *argp = &hdr->args; in nfs_writeback_result()
1591 struct nfs_pgio_res *resp = &hdr->res; in nfs_writeback_result()
1593 if (resp->count < argp->count) { in nfs_writeback_result()
1597 nfs_inc_stats(hdr->inode, NFSIOS_SHORTWRITE); in nfs_writeback_result()
1600 if (resp->count == 0) { in nfs_writeback_result()
1604 argp->count); in nfs_writeback_result()
1607 nfs_set_pgio_error(hdr, -EIO, argp->offset); in nfs_writeback_result()
1608 task->tk_status = -EIO; in nfs_writeback_result()
1612 /* For non rpc-based layout drivers, retry-through-MDS */ in nfs_writeback_result()
1613 if (!task->tk_ops) { in nfs_writeback_result()
1614 hdr->pnfs_error = -EAGAIN; in nfs_writeback_result()
1619 if (resp->verf->committed != NFS_UNSTABLE) { in nfs_writeback_result()
1621 hdr->mds_offset += resp->count; in nfs_writeback_result()
1622 argp->offset += resp->count; in nfs_writeback_result()
1623 argp->pgbase += resp->count; in nfs_writeback_result()
1624 argp->count -= resp->count; in nfs_writeback_result()
1629 argp->stable = NFS_FILE_SYNC; in nfs_writeback_result()
1631 resp->count = 0; in nfs_writeback_result()
1632 resp->verf->committed = 0; in nfs_writeback_result()
1639 return wait_var_event_killable(&cinfo->rpcs_out, in wait_on_commit()
1640 !atomic_read(&cinfo->rpcs_out)); in wait_on_commit()
1645 atomic_inc(&cinfo->rpcs_out); in nfs_commit_begin()
1650 if (atomic_dec_and_test(&cinfo->rpcs_out)) { in nfs_commit_end()
1651 wake_up_var(&cinfo->rpcs_out); in nfs_commit_end()
1657 void nfs_commitdata_release(struct nfs_commit_data *data) in nfs_commitdata_release() argument
1659 put_nfs_open_context(data->context); in nfs_commitdata_release()
1660 nfs_commit_free(data); in nfs_commitdata_release()
1664 int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data, in nfs_initiate_commit() argument
1673 .rpc_argp = &data->args, in nfs_initiate_commit()
1674 .rpc_resp = &data->res, in nfs_initiate_commit()
1675 .rpc_cred = data->cred, in nfs_initiate_commit()
1678 .task = &data->task, in nfs_initiate_commit()
1682 .callback_data = data, in nfs_initiate_commit()
1688 if (nfs_server_capable(data->inode, NFS_CAP_MOVEABLE)) in nfs_initiate_commit()
1692 nfs_ops->commit_setup(data, &msg, &task_setup_data.rpc_client); in nfs_initiate_commit()
1693 trace_nfs_initiate_commit(data); in nfs_initiate_commit()
1698 return nfs_local_commit(localio, data, call_ops, how); in nfs_initiate_commit()
1716 if (lwb < (req_offset(req) + req->wb_bytes)) in nfs_get_lwb()
1717 lwb = req_offset(req) + req->wb_bytes; in nfs_get_lwb()
1725 void nfs_init_commit(struct nfs_commit_data *data, in nfs_init_commit() argument
1735 * NB: take care not to mess about with data->commit et al. */ in nfs_init_commit()
1738 list_splice_init(head, &data->pages); in nfs_init_commit()
1740 first = nfs_list_entry(data->pages.next); in nfs_init_commit()
1742 inode = d_inode(ctx->dentry); in nfs_init_commit()
1744 data->inode = inode; in nfs_init_commit()
1745 data->cred = ctx->cred; in nfs_init_commit()
1746 data->lseg = lseg; /* reference transferred */ in nfs_init_commit()
1749 data->lwb = nfs_get_lwb(&data->pages); in nfs_init_commit()
1750 data->mds_ops = &nfs_commit_ops; in nfs_init_commit()
1751 data->completion_ops = cinfo->completion_ops; in nfs_init_commit()
1752 data->dreq = cinfo->dreq; in nfs_init_commit()
1754 data->args.fh = NFS_FH(data->inode); in nfs_init_commit()
1756 data->args.offset = 0; in nfs_init_commit()
1757 data->args.count = 0; in nfs_init_commit()
1758 data->context = get_nfs_open_context(ctx); in nfs_init_commit()
1759 data->res.fattr = &data->fattr; in nfs_init_commit()
1760 data->res.verf = &data->verf; in nfs_init_commit()
1761 nfs_fattr_init(&data->fattr); in nfs_init_commit()
1762 nfs_commit_begin(cinfo->mds); in nfs_init_commit()
1774 req = nfs_list_entry(page_list->next); in nfs_retry_commit()
1798 struct nfs_commit_data *data; in nfs_commit_list() local
1806 data = nfs_commitdata_alloc(); in nfs_commit_list()
1807 if (!data) { in nfs_commit_list()
1808 nfs_retry_commit(head, NULL, cinfo, -1); in nfs_commit_list()
1809 return -ENOMEM; in nfs_commit_list()
1813 nfs_init_commit(data, head, NULL, cinfo); in nfs_commit_list()
1814 if (NFS_SERVER(inode)->nfs_client->cl_minorversion) in nfs_commit_list()
1817 localio = nfs_local_open_fh(NFS_SERVER(inode)->nfs_client, data->cred, in nfs_commit_list()
1818 data->args.fh, data->context->mode); in nfs_commit_list()
1819 return nfs_initiate_commit(NFS_CLIENT(inode), data, NFS_PROTO(inode), in nfs_commit_list()
1820 data->mds_ops, how, in nfs_commit_list()
1829 struct nfs_commit_data *data = calldata; in nfs_commit_done() local
1831 /* Call the NFS version-specific code */ in nfs_commit_done()
1832 NFS_PROTO(data->inode)->commit_done(task, data); in nfs_commit_done()
1833 trace_nfs_commit_done(task, data); in nfs_commit_done()
1836 static void nfs_commit_release_pages(struct nfs_commit_data *data) in nfs_commit_release_pages() argument
1838 const struct nfs_writeverf *verf = data->res.verf; in nfs_commit_release_pages()
1840 int status = data->task.tk_status; in nfs_commit_release_pages()
1844 while (!list_empty(&data->pages)) { in nfs_commit_release_pages()
1845 req = nfs_list_entry(data->pages.next); in nfs_commit_release_pages()
1851 nfs_req_openctx(req)->dentry->d_sb->s_id, in nfs_commit_release_pages()
1852 (unsigned long long)NFS_FILEID(d_inode(nfs_req_openctx(req)->dentry)), in nfs_commit_release_pages()
1853 req->wb_bytes, in nfs_commit_release_pages()
1857 trace_nfs_commit_error(data->inode, req, in nfs_commit_release_pages()
1878 atomic_long_inc(&NFS_I(data->inode)->redirtied_pages); in nfs_commit_release_pages()
1885 nfs_init_cinfo(&cinfo, data->inode, data->dreq); in nfs_commit_release_pages()
1891 struct nfs_commit_data *data = calldata; in nfs_commit_release() local
1893 data->completion_ops->completion(data); in nfs_commit_release()
1938 if (wbc && wbc->sync_mode == WB_SYNC_NONE) { in __nfs_commit_inode()
1939 if (nscan < wbc->nr_to_write) in __nfs_commit_inode()
1940 wbc->nr_to_write -= nscan; in __nfs_commit_inode()
1942 wbc->nr_to_write = 0; in __nfs_commit_inode()
1966 if (wbc->sync_mode == WB_SYNC_NONE) { in nfs_write_inode()
1968 if (!atomic_long_read(&nfsi->commit_info.ncommit)) in nfs_write_inode()
1971 /* Don't commit yet if this is a non-blocking flush and there in nfs_write_inode()
1972 * are a lot of outstanding writes for this mapping. in nfs_write_inode()
1974 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)) in nfs_write_inode()
1985 } else if (atomic_long_read(&nfsi->commit_info.ncommit)) in nfs_write_inode()
1989 if (!atomic_read(&nfsi->commit_info.rpcs_out)) in nfs_write_inode()
2000 * Needed for pNFS in order to ensure data becomes visible to the
2003 int nfs_filemap_write_and_wait_range(struct address_space *mapping, in nfs_filemap_write_and_wait_range() argument
2008 ret = filemap_write_and_wait_range(mapping, lstart, lend); in nfs_filemap_write_and_wait_range()
2010 ret = pnfs_sync_inode(mapping->host, true); in nfs_filemap_write_and_wait_range()
2024 ret = filemap_write_and_wait(inode->i_mapping); in nfs_wb_all()
2065 * nfs_wb_folio - Write back all requests on one page
2080 .range_end = range_start + len - 1, in nfs_wb_folio()
2107 int nfs_migrate_folio(struct address_space *mapping, struct folio *dst, in nfs_migrate_folio() argument
2112 * an in-progress read or write request. Don't try to migrate it. in nfs_migrate_folio()
2119 return -EBUSY; in nfs_migrate_folio()
2123 return -EBUSY; in nfs_migrate_folio()
2127 return migrate_folio(mapping, dst, src, mode); in nfs_migrate_folio()
2138 return -ENOMEM; in nfs_init_writepagecache()
2173 nfs_congestion_kb = (16*int_sqrt(totalram_pages())) << (PAGE_SHIFT-10); in nfs_init_writepagecache()
2185 return -ENOMEM; in nfs_init_writepagecache()