xref: /linux/fs/bcachefs/fs-io-buffered.c (revision c4101e55974cc7d835fbd2d8e01553a3f61e9e75)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3 
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "bkey_buf.h"
7 #include "fs-io.h"
8 #include "fs-io-buffered.h"
9 #include "fs-io-direct.h"
10 #include "fs-io-pagecache.h"
11 #include "io_read.h"
12 #include "io_write.h"
13 
14 #include <linux/backing-dev.h>
15 #include <linux/pagemap.h>
16 #include <linux/writeback.h>
17 
18 static inline bool bio_full(struct bio *bio, unsigned len)
19 {
20 	if (bio->bi_vcnt >= bio->bi_max_vecs)
21 		return true;
22 	if (bio->bi_iter.bi_size > UINT_MAX - len)
23 		return true;
24 	return false;
25 }
26 
27 /* readpage(s): */
28 
29 static void bch2_readpages_end_io(struct bio *bio)
30 {
31 	struct folio_iter fi;
32 
33 	bio_for_each_folio_all(fi, bio) {
34 		if (!bio->bi_status) {
35 			folio_mark_uptodate(fi.folio);
36 		} else {
37 			folio_clear_uptodate(fi.folio);
38 			folio_set_error(fi.folio);
39 		}
40 		folio_unlock(fi.folio);
41 	}
42 
43 	bio_put(bio);
44 }
45 
46 struct readpages_iter {
47 	struct address_space	*mapping;
48 	unsigned		idx;
49 	folios			folios;
50 };
51 
52 static int readpages_iter_init(struct readpages_iter *iter,
53 			       struct readahead_control *ractl)
54 {
55 	struct folio *folio;
56 
57 	*iter = (struct readpages_iter) { ractl->mapping };
58 
59 	while ((folio = __readahead_folio(ractl))) {
60 		if (!bch2_folio_create(folio, GFP_KERNEL) ||
61 		    darray_push(&iter->folios, folio)) {
62 			bch2_folio_release(folio);
63 			ractl->_nr_pages += folio_nr_pages(folio);
64 			ractl->_index -= folio_nr_pages(folio);
65 			return iter->folios.nr ? 0 : -ENOMEM;
66 		}
67 
68 		folio_put(folio);
69 	}
70 
71 	return 0;
72 }
73 
74 static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
75 {
76 	if (iter->idx >= iter->folios.nr)
77 		return NULL;
78 	return iter->folios.data[iter->idx];
79 }
80 
81 static inline void readpage_iter_advance(struct readpages_iter *iter)
82 {
83 	iter->idx++;
84 }
85 
86 static bool extent_partial_reads_expensive(struct bkey_s_c k)
87 {
88 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
89 	struct bch_extent_crc_unpacked crc;
90 	const union bch_extent_entry *i;
91 
92 	bkey_for_each_crc(k.k, ptrs, crc, i)
93 		if (crc.csum_type || crc.compression_type)
94 			return true;
95 	return false;
96 }
97 
98 static int readpage_bio_extend(struct btree_trans *trans,
99 			       struct readpages_iter *iter,
100 			       struct bio *bio,
101 			       unsigned sectors_this_extent,
102 			       bool get_more)
103 {
104 	/* Don't hold btree locks while allocating memory: */
105 	bch2_trans_unlock(trans);
106 
107 	while (bio_sectors(bio) < sectors_this_extent &&
108 	       bio->bi_vcnt < bio->bi_max_vecs) {
109 		struct folio *folio = readpage_iter_peek(iter);
110 		int ret;
111 
112 		if (folio) {
113 			readpage_iter_advance(iter);
114 		} else {
115 			pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
116 
117 			if (!get_more)
118 				break;
119 
120 			folio = xa_load(&iter->mapping->i_pages, folio_offset);
121 			if (folio && !xa_is_value(folio))
122 				break;
123 
124 			folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
125 			if (!folio)
126 				break;
127 
128 			if (!__bch2_folio_create(folio, GFP_KERNEL)) {
129 				folio_put(folio);
130 				break;
131 			}
132 
133 			ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
134 			if (ret) {
135 				__bch2_folio_release(folio);
136 				folio_put(folio);
137 				break;
138 			}
139 
140 			folio_put(folio);
141 		}
142 
143 		BUG_ON(folio_sector(folio) != bio_end_sector(bio));
144 
145 		BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
146 	}
147 
148 	return bch2_trans_relock(trans);
149 }
150 
151 static void bchfs_read(struct btree_trans *trans,
152 		       struct bch_read_bio *rbio,
153 		       subvol_inum inum,
154 		       struct readpages_iter *readpages_iter)
155 {
156 	struct bch_fs *c = trans->c;
157 	struct btree_iter iter;
158 	struct bkey_buf sk;
159 	int flags = BCH_READ_RETRY_IF_STALE|
160 		BCH_READ_MAY_PROMOTE;
161 	u32 snapshot;
162 	int ret = 0;
163 
164 	rbio->c = c;
165 	rbio->start_time = local_clock();
166 	rbio->subvol = inum.subvol;
167 
168 	bch2_bkey_buf_init(&sk);
169 retry:
170 	bch2_trans_begin(trans);
171 	iter = (struct btree_iter) { NULL };
172 
173 	ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
174 	if (ret)
175 		goto err;
176 
177 	bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
178 			     SPOS(inum.inum, rbio->bio.bi_iter.bi_sector, snapshot),
179 			     BTREE_ITER_SLOTS);
180 	while (1) {
181 		struct bkey_s_c k;
182 		unsigned bytes, sectors, offset_into_extent;
183 		enum btree_id data_btree = BTREE_ID_extents;
184 
185 		/*
186 		 * read_extent -> io_time_reset may cause a transaction restart
187 		 * without returning an error, we need to check for that here:
188 		 */
189 		ret = bch2_trans_relock(trans);
190 		if (ret)
191 			break;
192 
193 		bch2_btree_iter_set_pos(&iter,
194 				POS(inum.inum, rbio->bio.bi_iter.bi_sector));
195 
196 		k = bch2_btree_iter_peek_slot(&iter);
197 		ret = bkey_err(k);
198 		if (ret)
199 			break;
200 
201 		offset_into_extent = iter.pos.offset -
202 			bkey_start_offset(k.k);
203 		sectors = k.k->size - offset_into_extent;
204 
205 		bch2_bkey_buf_reassemble(&sk, c, k);
206 
207 		ret = bch2_read_indirect_extent(trans, &data_btree,
208 					&offset_into_extent, &sk);
209 		if (ret)
210 			break;
211 
212 		k = bkey_i_to_s_c(sk.k);
213 
214 		sectors = min(sectors, k.k->size - offset_into_extent);
215 
216 		if (readpages_iter) {
217 			ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
218 						  extent_partial_reads_expensive(k));
219 			if (ret)
220 				break;
221 		}
222 
223 		bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
224 		swap(rbio->bio.bi_iter.bi_size, bytes);
225 
226 		if (rbio->bio.bi_iter.bi_size == bytes)
227 			flags |= BCH_READ_LAST_FRAGMENT;
228 
229 		bch2_bio_page_state_set(&rbio->bio, k);
230 
231 		bch2_read_extent(trans, rbio, iter.pos,
232 				 data_btree, k, offset_into_extent, flags);
233 
234 		if (flags & BCH_READ_LAST_FRAGMENT)
235 			break;
236 
237 		swap(rbio->bio.bi_iter.bi_size, bytes);
238 		bio_advance(&rbio->bio, bytes);
239 
240 		ret = btree_trans_too_many_iters(trans);
241 		if (ret)
242 			break;
243 	}
244 err:
245 	bch2_trans_iter_exit(trans, &iter);
246 
247 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
248 		goto retry;
249 
250 	if (ret) {
251 		bch_err_inum_offset_ratelimited(c,
252 				iter.pos.inode,
253 				iter.pos.offset << 9,
254 				"read error %i from btree lookup", ret);
255 		rbio->bio.bi_status = BLK_STS_IOERR;
256 		bio_endio(&rbio->bio);
257 	}
258 
259 	bch2_bkey_buf_exit(&sk, c);
260 }
261 
262 void bch2_readahead(struct readahead_control *ractl)
263 {
264 	struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
265 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
266 	struct bch_io_opts opts;
267 	struct btree_trans *trans = bch2_trans_get(c);
268 	struct folio *folio;
269 	struct readpages_iter readpages_iter;
270 
271 	bch2_inode_opts_get(&opts, c, &inode->ei_inode);
272 
273 	int ret = readpages_iter_init(&readpages_iter, ractl);
274 	if (ret)
275 		return;
276 
277 	bch2_pagecache_add_get(inode);
278 
279 	while ((folio = readpage_iter_peek(&readpages_iter))) {
280 		unsigned n = min_t(unsigned,
281 				   readpages_iter.folios.nr -
282 				   readpages_iter.idx,
283 				   BIO_MAX_VECS);
284 		struct bch_read_bio *rbio =
285 			rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
286 						   GFP_KERNEL, &c->bio_read),
287 				  opts);
288 
289 		readpage_iter_advance(&readpages_iter);
290 
291 		rbio->bio.bi_iter.bi_sector = folio_sector(folio);
292 		rbio->bio.bi_end_io = bch2_readpages_end_io;
293 		BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
294 
295 		bchfs_read(trans, rbio, inode_inum(inode),
296 			   &readpages_iter);
297 		bch2_trans_unlock(trans);
298 	}
299 
300 	bch2_pagecache_add_put(inode);
301 
302 	bch2_trans_put(trans);
303 	darray_exit(&readpages_iter.folios);
304 }
305 
306 static void __bchfs_readfolio(struct bch_fs *c, struct bch_read_bio *rbio,
307 			     subvol_inum inum, struct folio *folio)
308 {
309 	bch2_folio_create(folio, __GFP_NOFAIL);
310 
311 	rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
312 	rbio->bio.bi_iter.bi_sector = folio_sector(folio);
313 	BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
314 
315 	bch2_trans_run(c, (bchfs_read(trans, rbio, inum, NULL), 0));
316 }
317 
318 static void bch2_read_single_folio_end_io(struct bio *bio)
319 {
320 	complete(bio->bi_private);
321 }
322 
323 int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
324 {
325 	struct bch_inode_info *inode = to_bch_ei(mapping->host);
326 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
327 	struct bch_read_bio *rbio;
328 	struct bch_io_opts opts;
329 	int ret;
330 	DECLARE_COMPLETION_ONSTACK(done);
331 
332 	bch2_inode_opts_get(&opts, c, &inode->ei_inode);
333 
334 	rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
335 			 opts);
336 	rbio->bio.bi_private = &done;
337 	rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
338 
339 	__bchfs_readfolio(c, rbio, inode_inum(inode), folio);
340 	wait_for_completion(&done);
341 
342 	ret = blk_status_to_errno(rbio->bio.bi_status);
343 	bio_put(&rbio->bio);
344 
345 	if (ret < 0)
346 		return ret;
347 
348 	folio_mark_uptodate(folio);
349 	return 0;
350 }
351 
352 int bch2_read_folio(struct file *file, struct folio *folio)
353 {
354 	int ret;
355 
356 	ret = bch2_read_single_folio(folio, folio->mapping);
357 	folio_unlock(folio);
358 	return bch2_err_class(ret);
359 }
360 
361 /* writepages: */
362 
363 struct bch_writepage_io {
364 	struct bch_inode_info		*inode;
365 
366 	/* must be last: */
367 	struct bch_write_op		op;
368 };
369 
370 struct bch_writepage_state {
371 	struct bch_writepage_io	*io;
372 	struct bch_io_opts	opts;
373 	struct bch_folio_sector	*tmp;
374 	unsigned		tmp_sectors;
375 };
376 
377 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
378 								  struct bch_inode_info *inode)
379 {
380 	struct bch_writepage_state ret = { 0 };
381 
382 	bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
383 	return ret;
384 }
385 
386 /*
387  * Determine when a writepage io is full. We have to limit writepage bios to a
388  * single page per bvec (i.e. 1MB with 4k pages) because that is the limit to
389  * what the bounce path in bch2_write_extent() can handle. In theory we could
390  * loosen this restriction for non-bounce I/O, but we don't have that context
391  * here. Ideally, we can up this limit and make it configurable in the future
392  * when the bounce path can be enhanced to accommodate larger source bios.
393  */
394 static inline bool bch_io_full(struct bch_writepage_io *io, unsigned len)
395 {
396 	struct bio *bio = &io->op.wbio.bio;
397 	return bio_full(bio, len) ||
398 		(bio->bi_iter.bi_size + len > BIO_MAX_VECS * PAGE_SIZE);
399 }
400 
401 static void bch2_writepage_io_done(struct bch_write_op *op)
402 {
403 	struct bch_writepage_io *io =
404 		container_of(op, struct bch_writepage_io, op);
405 	struct bch_fs *c = io->op.c;
406 	struct bio *bio = &io->op.wbio.bio;
407 	struct folio_iter fi;
408 	unsigned i;
409 
410 	if (io->op.error) {
411 		set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
412 
413 		bio_for_each_folio_all(fi, bio) {
414 			struct bch_folio *s;
415 
416 			folio_set_error(fi.folio);
417 			mapping_set_error(fi.folio->mapping, -EIO);
418 
419 			s = __bch2_folio(fi.folio);
420 			spin_lock(&s->lock);
421 			for (i = 0; i < folio_sectors(fi.folio); i++)
422 				s->s[i].nr_replicas = 0;
423 			spin_unlock(&s->lock);
424 		}
425 	}
426 
427 	if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
428 		bio_for_each_folio_all(fi, bio) {
429 			struct bch_folio *s;
430 
431 			s = __bch2_folio(fi.folio);
432 			spin_lock(&s->lock);
433 			for (i = 0; i < folio_sectors(fi.folio); i++)
434 				s->s[i].nr_replicas = 0;
435 			spin_unlock(&s->lock);
436 		}
437 	}
438 
439 	/*
440 	 * racing with fallocate can cause us to add fewer sectors than
441 	 * expected - but we shouldn't add more sectors than expected:
442 	 */
443 	WARN_ON_ONCE(io->op.i_sectors_delta > 0);
444 
445 	/*
446 	 * (error (due to going RO) halfway through a page can screw that up
447 	 * slightly)
448 	 * XXX wtf?
449 	   BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
450 	 */
451 
452 	/*
453 	 * PageWriteback is effectively our ref on the inode - fixup i_blocks
454 	 * before calling end_page_writeback:
455 	 */
456 	bch2_i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
457 
458 	bio_for_each_folio_all(fi, bio) {
459 		struct bch_folio *s = __bch2_folio(fi.folio);
460 
461 		if (atomic_dec_and_test(&s->write_count))
462 			folio_end_writeback(fi.folio);
463 	}
464 
465 	bio_put(&io->op.wbio.bio);
466 }
467 
468 static void bch2_writepage_do_io(struct bch_writepage_state *w)
469 {
470 	struct bch_writepage_io *io = w->io;
471 
472 	w->io = NULL;
473 	closure_call(&io->op.cl, bch2_write, NULL, NULL);
474 }
475 
476 /*
477  * Get a bch_writepage_io and add @page to it - appending to an existing one if
478  * possible, else allocating a new one:
479  */
480 static void bch2_writepage_io_alloc(struct bch_fs *c,
481 				    struct writeback_control *wbc,
482 				    struct bch_writepage_state *w,
483 				    struct bch_inode_info *inode,
484 				    u64 sector,
485 				    unsigned nr_replicas)
486 {
487 	struct bch_write_op *op;
488 
489 	w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
490 					      REQ_OP_WRITE,
491 					      GFP_KERNEL,
492 					      &c->writepage_bioset),
493 			     struct bch_writepage_io, op.wbio.bio);
494 
495 	w->io->inode		= inode;
496 	op			= &w->io->op;
497 	bch2_write_op_init(op, c, w->opts);
498 	op->target		= w->opts.foreground_target;
499 	op->nr_replicas		= nr_replicas;
500 	op->res.nr_replicas	= nr_replicas;
501 	op->write_point		= writepoint_hashed(inode->ei_last_dirtied);
502 	op->subvol		= inode->ei_subvol;
503 	op->pos			= POS(inode->v.i_ino, sector);
504 	op->end_io		= bch2_writepage_io_done;
505 	op->devs_need_flush	= &inode->ei_devs_need_flush;
506 	op->wbio.bio.bi_iter.bi_sector = sector;
507 	op->wbio.bio.bi_opf	= wbc_to_write_flags(wbc);
508 }
509 
510 static int __bch2_writepage(struct folio *folio,
511 			    struct writeback_control *wbc,
512 			    void *data)
513 {
514 	struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
515 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
516 	struct bch_writepage_state *w = data;
517 	struct bch_folio *s;
518 	unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
519 	loff_t i_size = i_size_read(&inode->v);
520 	int ret;
521 
522 	EBUG_ON(!folio_test_uptodate(folio));
523 
524 	/* Is the folio fully inside i_size? */
525 	if (folio_end_pos(folio) <= i_size)
526 		goto do_io;
527 
528 	/* Is the folio fully outside i_size? (truncate in progress) */
529 	if (folio_pos(folio) >= i_size) {
530 		folio_unlock(folio);
531 		return 0;
532 	}
533 
534 	/*
535 	 * The folio straddles i_size.  It must be zeroed out on each and every
536 	 * writepage invocation because it may be mmapped.  "A file is mapped
537 	 * in multiples of the folio size.  For a file that is not a multiple of
538 	 * the  folio size, the remaining memory is zeroed when mapped, and
539 	 * writes to that region are not written out to the file."
540 	 */
541 	folio_zero_segment(folio,
542 			   i_size - folio_pos(folio),
543 			   folio_size(folio));
544 do_io:
545 	f_sectors = folio_sectors(folio);
546 	s = bch2_folio(folio);
547 
548 	if (f_sectors > w->tmp_sectors) {
549 		kfree(w->tmp);
550 		w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), __GFP_NOFAIL);
551 		w->tmp_sectors = f_sectors;
552 	}
553 
554 	/*
555 	 * Things get really hairy with errors during writeback:
556 	 */
557 	ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
558 	BUG_ON(ret);
559 
560 	/* Before unlocking the page, get copy of reservations: */
561 	spin_lock(&s->lock);
562 	memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
563 
564 	for (i = 0; i < f_sectors; i++) {
565 		if (s->s[i].state < SECTOR_dirty)
566 			continue;
567 
568 		nr_replicas_this_write =
569 			min_t(unsigned, nr_replicas_this_write,
570 			      s->s[i].nr_replicas +
571 			      s->s[i].replicas_reserved);
572 	}
573 
574 	for (i = 0; i < f_sectors; i++) {
575 		if (s->s[i].state < SECTOR_dirty)
576 			continue;
577 
578 		s->s[i].nr_replicas = w->opts.compression
579 			? 0 : nr_replicas_this_write;
580 
581 		s->s[i].replicas_reserved = 0;
582 		bch2_folio_sector_set(folio, s, i, SECTOR_allocated);
583 	}
584 	spin_unlock(&s->lock);
585 
586 	BUG_ON(atomic_read(&s->write_count));
587 	atomic_set(&s->write_count, 1);
588 
589 	BUG_ON(folio_test_writeback(folio));
590 	folio_start_writeback(folio);
591 
592 	folio_unlock(folio);
593 
594 	offset = 0;
595 	while (1) {
596 		unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
597 		u64 sector;
598 
599 		while (offset < f_sectors &&
600 		       w->tmp[offset].state < SECTOR_dirty)
601 			offset++;
602 
603 		if (offset == f_sectors)
604 			break;
605 
606 		while (offset + sectors < f_sectors &&
607 		       w->tmp[offset + sectors].state >= SECTOR_dirty) {
608 			reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
609 			dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
610 			sectors++;
611 		}
612 		BUG_ON(!sectors);
613 
614 		sector = folio_sector(folio) + offset;
615 
616 		if (w->io &&
617 		    (w->io->op.res.nr_replicas != nr_replicas_this_write ||
618 		     bch_io_full(w->io, sectors << 9) ||
619 		     bio_end_sector(&w->io->op.wbio.bio) != sector))
620 			bch2_writepage_do_io(w);
621 
622 		if (!w->io)
623 			bch2_writepage_io_alloc(c, wbc, w, inode, sector,
624 						nr_replicas_this_write);
625 
626 		atomic_inc(&s->write_count);
627 
628 		BUG_ON(inode != w->io->inode);
629 		BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
630 				     sectors << 9, offset << 9));
631 
632 		/* Check for writing past i_size: */
633 		WARN_ONCE((bio_end_sector(&w->io->op.wbio.bio) << 9) >
634 			  round_up(i_size, block_bytes(c)) &&
635 			  !test_bit(BCH_FS_emergency_ro, &c->flags),
636 			  "writing past i_size: %llu > %llu (unrounded %llu)\n",
637 			  bio_end_sector(&w->io->op.wbio.bio) << 9,
638 			  round_up(i_size, block_bytes(c)),
639 			  i_size);
640 
641 		w->io->op.res.sectors += reserved_sectors;
642 		w->io->op.i_sectors_delta -= dirty_sectors;
643 		w->io->op.new_i_size = i_size;
644 
645 		offset += sectors;
646 	}
647 
648 	if (atomic_dec_and_test(&s->write_count))
649 		folio_end_writeback(folio);
650 
651 	return 0;
652 }
653 
654 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
655 {
656 	struct bch_fs *c = mapping->host->i_sb->s_fs_info;
657 	struct bch_writepage_state w =
658 		bch_writepage_state_init(c, to_bch_ei(mapping->host));
659 	struct blk_plug plug;
660 	int ret;
661 
662 	blk_start_plug(&plug);
663 	ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
664 	if (w.io)
665 		bch2_writepage_do_io(&w);
666 	blk_finish_plug(&plug);
667 	kfree(w.tmp);
668 	return bch2_err_class(ret);
669 }
670 
671 /* buffered writes: */
672 
673 int bch2_write_begin(struct file *file, struct address_space *mapping,
674 		     loff_t pos, unsigned len,
675 		     struct page **pagep, void **fsdata)
676 {
677 	struct bch_inode_info *inode = to_bch_ei(mapping->host);
678 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
679 	struct bch2_folio_reservation *res;
680 	struct folio *folio;
681 	unsigned offset;
682 	int ret = -ENOMEM;
683 
684 	res = kmalloc(sizeof(*res), GFP_KERNEL);
685 	if (!res)
686 		return -ENOMEM;
687 
688 	bch2_folio_reservation_init(c, inode, res);
689 	*fsdata = res;
690 
691 	bch2_pagecache_add_get(inode);
692 
693 	folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
694 				FGP_LOCK|FGP_WRITE|FGP_CREAT|FGP_STABLE,
695 				mapping_gfp_mask(mapping));
696 	if (IS_ERR_OR_NULL(folio))
697 		goto err_unlock;
698 
699 	offset = pos - folio_pos(folio);
700 	len = min_t(size_t, len, folio_end_pos(folio) - pos);
701 
702 	if (folio_test_uptodate(folio))
703 		goto out;
704 
705 	/* If we're writing entire folio, don't need to read it in first: */
706 	if (!offset && len == folio_size(folio))
707 		goto out;
708 
709 	if (!offset && pos + len >= inode->v.i_size) {
710 		folio_zero_segment(folio, len, folio_size(folio));
711 		flush_dcache_folio(folio);
712 		goto out;
713 	}
714 
715 	if (folio_pos(folio) >= inode->v.i_size) {
716 		folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
717 		flush_dcache_folio(folio);
718 		goto out;
719 	}
720 readpage:
721 	ret = bch2_read_single_folio(folio, mapping);
722 	if (ret)
723 		goto err;
724 out:
725 	ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
726 	if (ret)
727 		goto err;
728 
729 	ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
730 	if (ret) {
731 		if (!folio_test_uptodate(folio)) {
732 			/*
733 			 * If the folio hasn't been read in, we won't know if we
734 			 * actually need a reservation - we don't actually need
735 			 * to read here, we just need to check if the folio is
736 			 * fully backed by uncompressed data:
737 			 */
738 			goto readpage;
739 		}
740 
741 		goto err;
742 	}
743 
744 	*pagep = &folio->page;
745 	return 0;
746 err:
747 	folio_unlock(folio);
748 	folio_put(folio);
749 	*pagep = NULL;
750 err_unlock:
751 	bch2_pagecache_add_put(inode);
752 	kfree(res);
753 	*fsdata = NULL;
754 	return bch2_err_class(ret);
755 }
756 
757 int bch2_write_end(struct file *file, struct address_space *mapping,
758 		   loff_t pos, unsigned len, unsigned copied,
759 		   struct page *page, void *fsdata)
760 {
761 	struct bch_inode_info *inode = to_bch_ei(mapping->host);
762 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
763 	struct bch2_folio_reservation *res = fsdata;
764 	struct folio *folio = page_folio(page);
765 	unsigned offset = pos - folio_pos(folio);
766 
767 	lockdep_assert_held(&inode->v.i_rwsem);
768 	BUG_ON(offset + copied > folio_size(folio));
769 
770 	if (unlikely(copied < len && !folio_test_uptodate(folio))) {
771 		/*
772 		 * The folio needs to be read in, but that would destroy
773 		 * our partial write - simplest thing is to just force
774 		 * userspace to redo the write:
775 		 */
776 		folio_zero_range(folio, 0, folio_size(folio));
777 		flush_dcache_folio(folio);
778 		copied = 0;
779 	}
780 
781 	spin_lock(&inode->v.i_lock);
782 	if (pos + copied > inode->v.i_size)
783 		i_size_write(&inode->v, pos + copied);
784 	spin_unlock(&inode->v.i_lock);
785 
786 	if (copied) {
787 		if (!folio_test_uptodate(folio))
788 			folio_mark_uptodate(folio);
789 
790 		bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
791 
792 		inode->ei_last_dirtied = (unsigned long) current;
793 	}
794 
795 	folio_unlock(folio);
796 	folio_put(folio);
797 	bch2_pagecache_add_put(inode);
798 
799 	bch2_folio_reservation_put(c, inode, res);
800 	kfree(res);
801 
802 	return copied;
803 }
804 
805 static noinline void folios_trunc(folios *fs, struct folio **fi)
806 {
807 	while (fs->data + fs->nr > fi) {
808 		struct folio *f = darray_pop(fs);
809 
810 		folio_unlock(f);
811 		folio_put(f);
812 	}
813 }
814 
815 static int __bch2_buffered_write(struct bch_inode_info *inode,
816 				 struct address_space *mapping,
817 				 struct iov_iter *iter,
818 				 loff_t pos, unsigned len)
819 {
820 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
821 	struct bch2_folio_reservation res;
822 	folios fs;
823 	struct folio *f;
824 	unsigned copied = 0, f_offset, f_copied;
825 	u64 end = pos + len, f_pos, f_len;
826 	loff_t last_folio_pos = inode->v.i_size;
827 	int ret = 0;
828 
829 	BUG_ON(!len);
830 
831 	bch2_folio_reservation_init(c, inode, &res);
832 	darray_init(&fs);
833 
834 	ret = bch2_filemap_get_contig_folios_d(mapping, pos, end,
835 				   FGP_LOCK|FGP_WRITE|FGP_STABLE|FGP_CREAT,
836 				   mapping_gfp_mask(mapping),
837 				   &fs);
838 	if (ret)
839 		goto out;
840 
841 	BUG_ON(!fs.nr);
842 
843 	f = darray_first(fs);
844 	if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
845 		ret = bch2_read_single_folio(f, mapping);
846 		if (ret)
847 			goto out;
848 	}
849 
850 	f = darray_last(fs);
851 	end = min(end, folio_end_pos(f));
852 	last_folio_pos = folio_pos(f);
853 	if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
854 		if (end >= inode->v.i_size) {
855 			folio_zero_range(f, 0, folio_size(f));
856 		} else {
857 			ret = bch2_read_single_folio(f, mapping);
858 			if (ret)
859 				goto out;
860 		}
861 	}
862 
863 	ret = bch2_folio_set(c, inode_inum(inode), fs.data, fs.nr);
864 	if (ret)
865 		goto out;
866 
867 	f_pos = pos;
868 	f_offset = pos - folio_pos(darray_first(fs));
869 	darray_for_each(fs, fi) {
870 		f = *fi;
871 		f_len = min(end, folio_end_pos(f)) - f_pos;
872 
873 		/*
874 		 * XXX: per POSIX and fstests generic/275, on -ENOSPC we're
875 		 * supposed to write as much as we have disk space for.
876 		 *
877 		 * On failure here we should still write out a partial page if
878 		 * we aren't completely out of disk space - we don't do that
879 		 * yet:
880 		 */
881 		ret = bch2_folio_reservation_get(c, inode, f, &res, f_offset, f_len);
882 		if (unlikely(ret)) {
883 			folios_trunc(&fs, fi);
884 			if (!fs.nr)
885 				goto out;
886 
887 			end = min(end, folio_end_pos(darray_last(fs)));
888 			break;
889 		}
890 
891 		f_pos = folio_end_pos(f);
892 		f_offset = 0;
893 	}
894 
895 	if (mapping_writably_mapped(mapping))
896 		darray_for_each(fs, fi)
897 			flush_dcache_folio(*fi);
898 
899 	f_pos = pos;
900 	f_offset = pos - folio_pos(darray_first(fs));
901 	darray_for_each(fs, fi) {
902 		f = *fi;
903 		f_len = min(end, folio_end_pos(f)) - f_pos;
904 		f_copied = copy_page_from_iter_atomic(&f->page, f_offset, f_len, iter);
905 		if (!f_copied) {
906 			folios_trunc(&fs, fi);
907 			break;
908 		}
909 
910 		if (!folio_test_uptodate(f) &&
911 		    f_copied != folio_size(f) &&
912 		    pos + copied + f_copied < inode->v.i_size) {
913 			iov_iter_revert(iter, f_copied);
914 			folio_zero_range(f, 0, folio_size(f));
915 			folios_trunc(&fs, fi);
916 			break;
917 		}
918 
919 		flush_dcache_folio(f);
920 		copied += f_copied;
921 
922 		if (f_copied != f_len) {
923 			folios_trunc(&fs, fi + 1);
924 			break;
925 		}
926 
927 		f_pos = folio_end_pos(f);
928 		f_offset = 0;
929 	}
930 
931 	if (!copied)
932 		goto out;
933 
934 	end = pos + copied;
935 
936 	spin_lock(&inode->v.i_lock);
937 	if (end > inode->v.i_size)
938 		i_size_write(&inode->v, end);
939 	spin_unlock(&inode->v.i_lock);
940 
941 	f_pos = pos;
942 	f_offset = pos - folio_pos(darray_first(fs));
943 	darray_for_each(fs, fi) {
944 		f = *fi;
945 		f_len = min(end, folio_end_pos(f)) - f_pos;
946 
947 		if (!folio_test_uptodate(f))
948 			folio_mark_uptodate(f);
949 
950 		bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
951 
952 		f_pos = folio_end_pos(f);
953 		f_offset = 0;
954 	}
955 
956 	inode->ei_last_dirtied = (unsigned long) current;
957 out:
958 	darray_for_each(fs, fi) {
959 		folio_unlock(*fi);
960 		folio_put(*fi);
961 	}
962 
963 	/*
964 	 * If the last folio added to the mapping starts beyond current EOF, we
965 	 * performed a short write but left around at least one post-EOF folio.
966 	 * Clean up the mapping before we return.
967 	 */
968 	if (last_folio_pos >= inode->v.i_size)
969 		truncate_pagecache(&inode->v, inode->v.i_size);
970 
971 	darray_exit(&fs);
972 	bch2_folio_reservation_put(c, inode, &res);
973 
974 	return copied ?: ret;
975 }
976 
977 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
978 {
979 	struct file *file = iocb->ki_filp;
980 	struct address_space *mapping = file->f_mapping;
981 	struct bch_inode_info *inode = file_bch_inode(file);
982 	loff_t pos = iocb->ki_pos;
983 	ssize_t written = 0;
984 	int ret = 0;
985 
986 	bch2_pagecache_add_get(inode);
987 
988 	do {
989 		unsigned offset = pos & (PAGE_SIZE - 1);
990 		unsigned bytes = iov_iter_count(iter);
991 again:
992 		/*
993 		 * Bring in the user page that we will copy from _first_.
994 		 * Otherwise there's a nasty deadlock on copying from the
995 		 * same page as we're writing to, without it being marked
996 		 * up-to-date.
997 		 *
998 		 * Not only is this an optimisation, but it is also required
999 		 * to check that the address is actually valid, when atomic
1000 		 * usercopies are used, below.
1001 		 */
1002 		if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1003 			bytes = min_t(unsigned long, iov_iter_count(iter),
1004 				      PAGE_SIZE - offset);
1005 
1006 			if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1007 				ret = -EFAULT;
1008 				break;
1009 			}
1010 		}
1011 
1012 		if (unlikely(fatal_signal_pending(current))) {
1013 			ret = -EINTR;
1014 			break;
1015 		}
1016 
1017 		ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1018 		if (unlikely(ret < 0))
1019 			break;
1020 
1021 		cond_resched();
1022 
1023 		if (unlikely(ret == 0)) {
1024 			/*
1025 			 * If we were unable to copy any data at all, we must
1026 			 * fall back to a single segment length write.
1027 			 *
1028 			 * If we didn't fallback here, we could livelock
1029 			 * because not all segments in the iov can be copied at
1030 			 * once without a pagefault.
1031 			 */
1032 			bytes = min_t(unsigned long, PAGE_SIZE - offset,
1033 				      iov_iter_single_seg_count(iter));
1034 			goto again;
1035 		}
1036 		pos += ret;
1037 		written += ret;
1038 		ret = 0;
1039 
1040 		balance_dirty_pages_ratelimited(mapping);
1041 	} while (iov_iter_count(iter));
1042 
1043 	bch2_pagecache_add_put(inode);
1044 
1045 	return written ? written : ret;
1046 }
1047 
1048 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1049 {
1050 	struct file *file = iocb->ki_filp;
1051 	struct bch_inode_info *inode = file_bch_inode(file);
1052 	ssize_t ret;
1053 
1054 	if (iocb->ki_flags & IOCB_DIRECT) {
1055 		ret = bch2_direct_write(iocb, from);
1056 		goto out;
1057 	}
1058 
1059 	inode_lock(&inode->v);
1060 
1061 	ret = generic_write_checks(iocb, from);
1062 	if (ret <= 0)
1063 		goto unlock;
1064 
1065 	ret = file_remove_privs(file);
1066 	if (ret)
1067 		goto unlock;
1068 
1069 	ret = file_update_time(file);
1070 	if (ret)
1071 		goto unlock;
1072 
1073 	ret = bch2_buffered_write(iocb, from);
1074 	if (likely(ret > 0))
1075 		iocb->ki_pos += ret;
1076 unlock:
1077 	inode_unlock(&inode->v);
1078 
1079 	if (ret > 0)
1080 		ret = generic_write_sync(iocb, ret);
1081 out:
1082 	return bch2_err_class(ret);
1083 }
1084 
1085 void bch2_fs_fs_io_buffered_exit(struct bch_fs *c)
1086 {
1087 	bioset_exit(&c->writepage_bioset);
1088 }
1089 
1090 int bch2_fs_fs_io_buffered_init(struct bch_fs *c)
1091 {
1092 	if (bioset_init(&c->writepage_bioset,
1093 			4, offsetof(struct bch_writepage_io, op.wbio.bio),
1094 			BIOSET_NEED_BVECS))
1095 		return -BCH_ERR_ENOMEM_writepage_bioset_init;
1096 
1097 	return 0;
1098 }
1099 
1100 #endif /* NO_BCACHEFS_FS */
1101