xref: /linux/fs/netfs/buffered_write.c (revision 1a529af6f81e54f15df162a0c703459937941c54)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Network filesystem high-level buffered write support.
3  *
4  * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells (dhowells@redhat.com)
6  */
7 
8 #include <linux/export.h>
9 #include <linux/fs.h>
10 #include <linux/mm.h>
11 #include <linux/pagemap.h>
12 #include <linux/slab.h>
13 #include <linux/pagevec.h>
14 #include "internal.h"
15 
16 /*
17  * Determined write method.  Adjust netfs_folio_traces if this is changed.
18  */
19 enum netfs_how_to_modify {
20 	NETFS_FOLIO_IS_UPTODATE,	/* Folio is uptodate already */
21 	NETFS_JUST_PREFETCH,		/* We have to read the folio anyway */
22 	NETFS_WHOLE_FOLIO_MODIFY,	/* We're going to overwrite the whole folio */
23 	NETFS_MODIFY_AND_CLEAR,		/* We can assume there is no data to be downloaded. */
24 	NETFS_STREAMING_WRITE,		/* Store incomplete data in non-uptodate page. */
25 	NETFS_STREAMING_WRITE_CONT,	/* Continue streaming write. */
26 	NETFS_FLUSH_CONTENT,		/* Flush incompatible content. */
27 };
28 
29 static void netfs_set_group(struct folio *folio, struct netfs_group *netfs_group)
30 {
31 	void *priv = folio_get_private(folio);
32 
33 	if (netfs_group && (!priv || priv == NETFS_FOLIO_COPY_TO_CACHE))
34 		folio_attach_private(folio, netfs_get_group(netfs_group));
35 	else if (!netfs_group && priv == NETFS_FOLIO_COPY_TO_CACHE)
36 		folio_detach_private(folio);
37 }
38 
39 /*
40  * Decide how we should modify a folio.  We might be attempting to do
41  * write-streaming, in which case we don't want to a local RMW cycle if we can
42  * avoid it.  If we're doing local caching or content crypto, we award that
43  * priority over avoiding RMW.  If the file is open readably, then we also
44  * assume that we may want to read what we wrote.
45  */
46 static enum netfs_how_to_modify netfs_how_to_modify(struct netfs_inode *ctx,
47 						    struct file *file,
48 						    struct folio *folio,
49 						    void *netfs_group,
50 						    size_t flen,
51 						    size_t offset,
52 						    size_t len,
53 						    bool maybe_trouble)
54 {
55 	struct netfs_folio *finfo = netfs_folio_info(folio);
56 	struct netfs_group *group = netfs_folio_group(folio);
57 	loff_t pos = folio_pos(folio);
58 
59 	_enter("");
60 
61 	if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE)
62 		return NETFS_FLUSH_CONTENT;
63 
64 	if (folio_test_uptodate(folio))
65 		return NETFS_FOLIO_IS_UPTODATE;
66 
67 	if (pos >= ctx->zero_point)
68 		return NETFS_MODIFY_AND_CLEAR;
69 
70 	if (!maybe_trouble && offset == 0 && len >= flen)
71 		return NETFS_WHOLE_FOLIO_MODIFY;
72 
73 	if (file->f_mode & FMODE_READ)
74 		goto no_write_streaming;
75 
76 	if (netfs_is_cache_enabled(ctx)) {
77 		/* We don't want to get a streaming write on a file that loses
78 		 * caching service temporarily because the backing store got
79 		 * culled.
80 		 */
81 		goto no_write_streaming;
82 	}
83 
84 	if (!finfo)
85 		return NETFS_STREAMING_WRITE;
86 
87 	/* We can continue a streaming write only if it continues on from the
88 	 * previous.  If it overlaps, we must flush lest we suffer a partial
89 	 * copy and disjoint dirty regions.
90 	 */
91 	if (offset == finfo->dirty_offset + finfo->dirty_len)
92 		return NETFS_STREAMING_WRITE_CONT;
93 	return NETFS_FLUSH_CONTENT;
94 
95 no_write_streaming:
96 	if (finfo) {
97 		netfs_stat(&netfs_n_wh_wstream_conflict);
98 		return NETFS_FLUSH_CONTENT;
99 	}
100 	return NETFS_JUST_PREFETCH;
101 }
102 
103 /*
104  * Grab a folio for writing and lock it.  Attempt to allocate as large a folio
105  * as possible to hold as much of the remaining length as possible in one go.
106  */
107 static struct folio *netfs_grab_folio_for_write(struct address_space *mapping,
108 						loff_t pos, size_t part)
109 {
110 	pgoff_t index = pos / PAGE_SIZE;
111 	fgf_t fgp_flags = FGP_WRITEBEGIN;
112 
113 	if (mapping_large_folio_support(mapping))
114 		fgp_flags |= fgf_set_order(pos % PAGE_SIZE + part);
115 
116 	return __filemap_get_folio(mapping, index, fgp_flags,
117 				   mapping_gfp_mask(mapping));
118 }
119 
120 /*
121  * Update i_size and estimate the update to i_blocks to reflect the additional
122  * data written into the pagecache until we can find out from the server what
123  * the values actually are.
124  */
125 static void netfs_update_i_size(struct netfs_inode *ctx, struct inode *inode,
126 				loff_t i_size, loff_t pos, size_t copied)
127 {
128 	blkcnt_t add;
129 	size_t gap;
130 
131 	if (ctx->ops->update_i_size) {
132 		ctx->ops->update_i_size(inode, pos);
133 		return;
134 	}
135 
136 	i_size_write(inode, pos);
137 #if IS_ENABLED(CONFIG_FSCACHE)
138 	fscache_update_cookie(ctx->cache, NULL, &pos);
139 #endif
140 
141 	gap = SECTOR_SIZE - (i_size & (SECTOR_SIZE - 1));
142 	if (copied > gap) {
143 		add = DIV_ROUND_UP(copied - gap, SECTOR_SIZE);
144 
145 		inode->i_blocks = min_t(blkcnt_t,
146 					DIV_ROUND_UP(pos, SECTOR_SIZE),
147 					inode->i_blocks + add);
148 	}
149 }
150 
151 /**
152  * netfs_perform_write - Copy data into the pagecache.
153  * @iocb: The operation parameters
154  * @iter: The source buffer
155  * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
156  *
157  * Copy data into pagecache pages attached to the inode specified by @iocb.
158  * The caller must hold appropriate inode locks.
159  *
160  * Dirty pages are tagged with a netfs_folio struct if they're not up to date
161  * to indicate the range modified.  Dirty pages may also be tagged with a
162  * netfs-specific grouping such that data from an old group gets flushed before
163  * a new one is started.
164  */
165 ssize_t netfs_perform_write(struct kiocb *iocb, struct iov_iter *iter,
166 			    struct netfs_group *netfs_group)
167 {
168 	struct file *file = iocb->ki_filp;
169 	struct inode *inode = file_inode(file);
170 	struct address_space *mapping = inode->i_mapping;
171 	struct netfs_inode *ctx = netfs_inode(inode);
172 	struct writeback_control wbc = {
173 		.sync_mode	= WB_SYNC_NONE,
174 		.for_sync	= true,
175 		.nr_to_write	= LONG_MAX,
176 		.range_start	= iocb->ki_pos,
177 		.range_end	= iocb->ki_pos + iter->count,
178 	};
179 	struct netfs_io_request *wreq = NULL;
180 	struct netfs_folio *finfo;
181 	struct folio *folio, *writethrough = NULL;
182 	enum netfs_how_to_modify howto;
183 	enum netfs_folio_trace trace;
184 	unsigned int bdp_flags = (iocb->ki_flags & IOCB_NOWAIT) ? BDP_ASYNC : 0;
185 	ssize_t written = 0, ret, ret2;
186 	loff_t i_size, pos = iocb->ki_pos, from, to;
187 	size_t max_chunk = mapping_max_folio_size(mapping);
188 	bool maybe_trouble = false;
189 
190 	if (unlikely(test_bit(NETFS_ICTX_WRITETHROUGH, &ctx->flags) ||
191 		     iocb->ki_flags & (IOCB_DSYNC | IOCB_SYNC))
192 	    ) {
193 		wbc_attach_fdatawrite_inode(&wbc, mapping->host);
194 
195 		ret = filemap_write_and_wait_range(mapping, pos, pos + iter->count);
196 		if (ret < 0) {
197 			wbc_detach_inode(&wbc);
198 			goto out;
199 		}
200 
201 		wreq = netfs_begin_writethrough(iocb, iter->count);
202 		if (IS_ERR(wreq)) {
203 			wbc_detach_inode(&wbc);
204 			ret = PTR_ERR(wreq);
205 			wreq = NULL;
206 			goto out;
207 		}
208 		if (!is_sync_kiocb(iocb))
209 			wreq->iocb = iocb;
210 		netfs_stat(&netfs_n_wh_writethrough);
211 	} else {
212 		netfs_stat(&netfs_n_wh_buffered_write);
213 	}
214 
215 	do {
216 		size_t flen;
217 		size_t offset;	/* Offset into pagecache folio */
218 		size_t part;	/* Bytes to write to folio */
219 		size_t copied;	/* Bytes copied from user */
220 
221 		ret = balance_dirty_pages_ratelimited_flags(mapping, bdp_flags);
222 		if (unlikely(ret < 0))
223 			break;
224 
225 		offset = pos & (max_chunk - 1);
226 		part = min(max_chunk - offset, iov_iter_count(iter));
227 
228 		/* Bring in the user pages that we will copy from _first_ lest
229 		 * we hit a nasty deadlock on copying from the same page as
230 		 * we're writing to, without it being marked uptodate.
231 		 *
232 		 * Not only is this an optimisation, but it is also required to
233 		 * check that the address is actually valid, when atomic
234 		 * usercopies are used below.
235 		 *
236 		 * We rely on the page being held onto long enough by the LRU
237 		 * that we can grab it below if this causes it to be read.
238 		 */
239 		ret = -EFAULT;
240 		if (unlikely(fault_in_iov_iter_readable(iter, part) == part))
241 			break;
242 
243 		folio = netfs_grab_folio_for_write(mapping, pos, part);
244 		if (IS_ERR(folio)) {
245 			ret = PTR_ERR(folio);
246 			break;
247 		}
248 
249 		flen = folio_size(folio);
250 		offset = pos & (flen - 1);
251 		part = min_t(size_t, flen - offset, part);
252 
253 		/* Wait for writeback to complete.  The writeback engine owns
254 		 * the info in folio->private and may change it until it
255 		 * removes the WB mark.
256 		 */
257 		if (folio_get_private(folio) &&
258 		    folio_wait_writeback_killable(folio)) {
259 			ret = written ? -EINTR : -ERESTARTSYS;
260 			goto error_folio_unlock;
261 		}
262 
263 		if (signal_pending(current)) {
264 			ret = written ? -EINTR : -ERESTARTSYS;
265 			goto error_folio_unlock;
266 		}
267 
268 		/* See if we need to prefetch the area we're going to modify.
269 		 * We need to do this before we get a lock on the folio in case
270 		 * there's more than one writer competing for the same cache
271 		 * block.
272 		 */
273 		howto = netfs_how_to_modify(ctx, file, folio, netfs_group,
274 					    flen, offset, part, maybe_trouble);
275 		_debug("howto %u", howto);
276 		switch (howto) {
277 		case NETFS_JUST_PREFETCH:
278 			ret = netfs_prefetch_for_write(file, folio, offset, part);
279 			if (ret < 0) {
280 				_debug("prefetch = %zd", ret);
281 				goto error_folio_unlock;
282 			}
283 			break;
284 		case NETFS_FOLIO_IS_UPTODATE:
285 		case NETFS_WHOLE_FOLIO_MODIFY:
286 		case NETFS_STREAMING_WRITE_CONT:
287 			break;
288 		case NETFS_MODIFY_AND_CLEAR:
289 			zero_user_segment(&folio->page, 0, offset);
290 			break;
291 		case NETFS_STREAMING_WRITE:
292 			ret = -EIO;
293 			if (WARN_ON(folio_get_private(folio)))
294 				goto error_folio_unlock;
295 			break;
296 		case NETFS_FLUSH_CONTENT:
297 			trace_netfs_folio(folio, netfs_flush_content);
298 			from = folio_pos(folio);
299 			to = from + folio_size(folio) - 1;
300 			folio_unlock(folio);
301 			folio_put(folio);
302 			ret = filemap_write_and_wait_range(mapping, from, to);
303 			if (ret < 0)
304 				goto error_folio_unlock;
305 			continue;
306 		}
307 
308 		if (mapping_writably_mapped(mapping))
309 			flush_dcache_folio(folio);
310 
311 		copied = copy_folio_from_iter_atomic(folio, offset, part, iter);
312 
313 		flush_dcache_folio(folio);
314 
315 		/* Deal with a (partially) failed copy */
316 		if (copied == 0) {
317 			ret = -EFAULT;
318 			goto error_folio_unlock;
319 		}
320 
321 		trace = (enum netfs_folio_trace)howto;
322 		switch (howto) {
323 		case NETFS_FOLIO_IS_UPTODATE:
324 		case NETFS_JUST_PREFETCH:
325 			netfs_set_group(folio, netfs_group);
326 			break;
327 		case NETFS_MODIFY_AND_CLEAR:
328 			zero_user_segment(&folio->page, offset + copied, flen);
329 			netfs_set_group(folio, netfs_group);
330 			folio_mark_uptodate(folio);
331 			break;
332 		case NETFS_WHOLE_FOLIO_MODIFY:
333 			if (unlikely(copied < part)) {
334 				maybe_trouble = true;
335 				iov_iter_revert(iter, copied);
336 				copied = 0;
337 				folio_unlock(folio);
338 				goto retry;
339 			}
340 			netfs_set_group(folio, netfs_group);
341 			folio_mark_uptodate(folio);
342 			break;
343 		case NETFS_STREAMING_WRITE:
344 			if (offset == 0 && copied == flen) {
345 				netfs_set_group(folio, netfs_group);
346 				folio_mark_uptodate(folio);
347 				trace = netfs_streaming_filled_page;
348 				break;
349 			}
350 			finfo = kzalloc(sizeof(*finfo), GFP_KERNEL);
351 			if (!finfo) {
352 				iov_iter_revert(iter, copied);
353 				ret = -ENOMEM;
354 				goto error_folio_unlock;
355 			}
356 			finfo->netfs_group = netfs_get_group(netfs_group);
357 			finfo->dirty_offset = offset;
358 			finfo->dirty_len = copied;
359 			folio_attach_private(folio, (void *)((unsigned long)finfo |
360 							     NETFS_FOLIO_INFO));
361 			break;
362 		case NETFS_STREAMING_WRITE_CONT:
363 			finfo = netfs_folio_info(folio);
364 			finfo->dirty_len += copied;
365 			if (finfo->dirty_offset == 0 && finfo->dirty_len == flen) {
366 				if (finfo->netfs_group)
367 					folio_change_private(folio, finfo->netfs_group);
368 				else
369 					folio_detach_private(folio);
370 				folio_mark_uptodate(folio);
371 				kfree(finfo);
372 				trace = netfs_streaming_cont_filled_page;
373 			}
374 			break;
375 		default:
376 			WARN(true, "Unexpected modify type %u ix=%lx\n",
377 			     howto, folio->index);
378 			ret = -EIO;
379 			goto error_folio_unlock;
380 		}
381 
382 		trace_netfs_folio(folio, trace);
383 
384 		/* Update the inode size if we moved the EOF marker */
385 		pos += copied;
386 		i_size = i_size_read(inode);
387 		if (pos > i_size)
388 			netfs_update_i_size(ctx, inode, i_size, pos, copied);
389 		written += copied;
390 
391 		if (likely(!wreq)) {
392 			folio_mark_dirty(folio);
393 			folio_unlock(folio);
394 		} else {
395 			netfs_advance_writethrough(wreq, &wbc, folio, copied,
396 						   offset + copied == flen,
397 						   &writethrough);
398 			/* Folio unlocked */
399 		}
400 	retry:
401 		folio_put(folio);
402 		folio = NULL;
403 
404 		cond_resched();
405 	} while (iov_iter_count(iter));
406 
407 out:
408 	if (likely(written) && ctx->ops->post_modify)
409 		ctx->ops->post_modify(inode);
410 
411 	if (unlikely(wreq)) {
412 		ret2 = netfs_end_writethrough(wreq, &wbc, writethrough);
413 		wbc_detach_inode(&wbc);
414 		if (ret2 == -EIOCBQUEUED)
415 			return ret2;
416 		if (ret == 0)
417 			ret = ret2;
418 	}
419 
420 	iocb->ki_pos += written;
421 	_leave(" = %zd [%zd]", written, ret);
422 	return written ? written : ret;
423 
424 error_folio_unlock:
425 	folio_unlock(folio);
426 	folio_put(folio);
427 	goto out;
428 }
429 EXPORT_SYMBOL(netfs_perform_write);
430 
431 /**
432  * netfs_buffered_write_iter_locked - write data to a file
433  * @iocb:	IO state structure (file, offset, etc.)
434  * @from:	iov_iter with data to write
435  * @netfs_group: Grouping for dirty pages (eg. ceph snaps).
436  *
437  * This function does all the work needed for actually writing data to a
438  * file. It does all basic checks, removes SUID from the file, updates
439  * modification times and calls proper subroutines depending on whether we
440  * do direct IO or a standard buffered write.
441  *
442  * The caller must hold appropriate locks around this function and have called
443  * generic_write_checks() already.  The caller is also responsible for doing
444  * any necessary syncing afterwards.
445  *
446  * This function does *not* take care of syncing data in case of O_SYNC write.
447  * A caller has to handle it. This is mainly due to the fact that we want to
448  * avoid syncing under i_rwsem.
449  *
450  * Return:
451  * * number of bytes written, even for truncated writes
452  * * negative error code if no data has been written at all
453  */
454 ssize_t netfs_buffered_write_iter_locked(struct kiocb *iocb, struct iov_iter *from,
455 					 struct netfs_group *netfs_group)
456 {
457 	struct file *file = iocb->ki_filp;
458 	ssize_t ret;
459 
460 	trace_netfs_write_iter(iocb, from);
461 
462 	ret = file_remove_privs(file);
463 	if (ret)
464 		return ret;
465 
466 	ret = file_update_time(file);
467 	if (ret)
468 		return ret;
469 
470 	return netfs_perform_write(iocb, from, netfs_group);
471 }
472 EXPORT_SYMBOL(netfs_buffered_write_iter_locked);
473 
474 /**
475  * netfs_file_write_iter - write data to a file
476  * @iocb: IO state structure
477  * @from: iov_iter with data to write
478  *
479  * Perform a write to a file, writing into the pagecache if possible and doing
480  * an unbuffered write instead if not.
481  *
482  * Return:
483  * * Negative error code if no data has been written at all of
484  *   vfs_fsync_range() failed for a synchronous write
485  * * Number of bytes written, even for truncated writes
486  */
487 ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
488 {
489 	struct file *file = iocb->ki_filp;
490 	struct inode *inode = file->f_mapping->host;
491 	struct netfs_inode *ictx = netfs_inode(inode);
492 	ssize_t ret;
493 
494 	_enter("%llx,%zx,%llx", iocb->ki_pos, iov_iter_count(from), i_size_read(inode));
495 
496 	if (!iov_iter_count(from))
497 		return 0;
498 
499 	if ((iocb->ki_flags & IOCB_DIRECT) ||
500 	    test_bit(NETFS_ICTX_UNBUFFERED, &ictx->flags))
501 		return netfs_unbuffered_write_iter(iocb, from);
502 
503 	ret = netfs_start_io_write(inode);
504 	if (ret < 0)
505 		return ret;
506 
507 	ret = generic_write_checks(iocb, from);
508 	if (ret > 0)
509 		ret = netfs_buffered_write_iter_locked(iocb, from, NULL);
510 	netfs_end_io_write(inode);
511 	if (ret > 0)
512 		ret = generic_write_sync(iocb, ret);
513 	return ret;
514 }
515 EXPORT_SYMBOL(netfs_file_write_iter);
516 
517 /*
518  * Notification that a previously read-only page is about to become writable.
519  * Note that the caller indicates a single page of a multipage folio.
520  */
521 vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf, struct netfs_group *netfs_group)
522 {
523 	struct netfs_group *group;
524 	struct folio *folio = page_folio(vmf->page);
525 	struct file *file = vmf->vma->vm_file;
526 	struct address_space *mapping = file->f_mapping;
527 	struct inode *inode = file_inode(file);
528 	struct netfs_inode *ictx = netfs_inode(inode);
529 	vm_fault_t ret = VM_FAULT_RETRY;
530 	int err;
531 
532 	_enter("%lx", folio->index);
533 
534 	sb_start_pagefault(inode->i_sb);
535 
536 	if (folio_lock_killable(folio) < 0)
537 		goto out;
538 	if (folio->mapping != mapping) {
539 		folio_unlock(folio);
540 		ret = VM_FAULT_NOPAGE;
541 		goto out;
542 	}
543 
544 	if (folio_wait_writeback_killable(folio)) {
545 		ret = VM_FAULT_LOCKED;
546 		goto out;
547 	}
548 
549 	/* Can we see a streaming write here? */
550 	if (WARN_ON(!folio_test_uptodate(folio))) {
551 		ret = VM_FAULT_SIGBUS | VM_FAULT_LOCKED;
552 		goto out;
553 	}
554 
555 	group = netfs_folio_group(folio);
556 	if (group != netfs_group && group != NETFS_FOLIO_COPY_TO_CACHE) {
557 		folio_unlock(folio);
558 		err = filemap_fdatawrite_range(mapping,
559 					       folio_pos(folio),
560 					       folio_pos(folio) + folio_size(folio));
561 		switch (err) {
562 		case 0:
563 			ret = VM_FAULT_RETRY;
564 			goto out;
565 		case -ENOMEM:
566 			ret = VM_FAULT_OOM;
567 			goto out;
568 		default:
569 			ret = VM_FAULT_SIGBUS;
570 			goto out;
571 		}
572 	}
573 
574 	if (folio_test_dirty(folio))
575 		trace_netfs_folio(folio, netfs_folio_trace_mkwrite_plus);
576 	else
577 		trace_netfs_folio(folio, netfs_folio_trace_mkwrite);
578 	netfs_set_group(folio, netfs_group);
579 	file_update_time(file);
580 	if (ictx->ops->post_modify)
581 		ictx->ops->post_modify(inode);
582 	ret = VM_FAULT_LOCKED;
583 out:
584 	sb_end_pagefault(inode->i_sb);
585 	return ret;
586 }
587 EXPORT_SYMBOL(netfs_page_mkwrite);
588