xref: /linux/fs/bcachefs/fs-io-buffered.c (revision ff0905bbf991f4337b5ebc19c0d43525ebb0d96b)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3 
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "bkey_buf.h"
7 #include "fs-io.h"
8 #include "fs-io-buffered.h"
9 #include "fs-io-direct.h"
10 #include "fs-io-pagecache.h"
11 #include "io_read.h"
12 #include "io_write.h"
13 
14 #include <linux/backing-dev.h>
15 #include <linux/pagemap.h>
16 #include <linux/writeback.h>
17 
bio_full(struct bio * bio,unsigned len)18 static inline bool bio_full(struct bio *bio, unsigned len)
19 {
20 	if (bio->bi_vcnt >= bio->bi_max_vecs)
21 		return true;
22 	if (bio->bi_iter.bi_size > UINT_MAX - len)
23 		return true;
24 	return false;
25 }
26 
27 /* readpage(s): */
28 
bch2_readpages_end_io(struct bio * bio)29 static void bch2_readpages_end_io(struct bio *bio)
30 {
31 	struct folio_iter fi;
32 
33 	bio_for_each_folio_all(fi, bio)
34 		folio_end_read(fi.folio, bio->bi_status == BLK_STS_OK);
35 
36 	bio_put(bio);
37 }
38 
39 struct readpages_iter {
40 	struct address_space	*mapping;
41 	unsigned		idx;
42 	folios			folios;
43 };
44 
readpages_iter_init(struct readpages_iter * iter,struct readahead_control * ractl)45 static int readpages_iter_init(struct readpages_iter *iter,
46 			       struct readahead_control *ractl)
47 {
48 	struct folio *folio;
49 
50 	*iter = (struct readpages_iter) { ractl->mapping };
51 
52 	while ((folio = __readahead_folio(ractl))) {
53 		if (!bch2_folio_create(folio, GFP_KERNEL) ||
54 		    darray_push(&iter->folios, folio)) {
55 			bch2_folio_release(folio);
56 			ractl->_nr_pages += folio_nr_pages(folio);
57 			ractl->_index -= folio_nr_pages(folio);
58 			return iter->folios.nr ? 0 : -ENOMEM;
59 		}
60 
61 		folio_put(folio);
62 	}
63 
64 	return 0;
65 }
66 
readpage_iter_peek(struct readpages_iter * iter)67 static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
68 {
69 	if (iter->idx >= iter->folios.nr)
70 		return NULL;
71 	return iter->folios.data[iter->idx];
72 }
73 
readpage_iter_advance(struct readpages_iter * iter)74 static inline void readpage_iter_advance(struct readpages_iter *iter)
75 {
76 	iter->idx++;
77 }
78 
extent_partial_reads_expensive(struct bkey_s_c k)79 static bool extent_partial_reads_expensive(struct bkey_s_c k)
80 {
81 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
82 	struct bch_extent_crc_unpacked crc;
83 	const union bch_extent_entry *i;
84 
85 	bkey_for_each_crc(k.k, ptrs, crc, i)
86 		if (crc.csum_type || crc.compression_type)
87 			return true;
88 	return false;
89 }
90 
readpage_bio_extend(struct btree_trans * trans,struct readpages_iter * iter,struct bio * bio,unsigned sectors_this_extent,bool get_more)91 static int readpage_bio_extend(struct btree_trans *trans,
92 			       struct readpages_iter *iter,
93 			       struct bio *bio,
94 			       unsigned sectors_this_extent,
95 			       bool get_more)
96 {
97 	/* Don't hold btree locks while allocating memory: */
98 	bch2_trans_unlock(trans);
99 
100 	while (bio_sectors(bio) < sectors_this_extent &&
101 	       bio->bi_vcnt < bio->bi_max_vecs) {
102 		struct folio *folio = readpage_iter_peek(iter);
103 		int ret;
104 
105 		if (folio) {
106 			readpage_iter_advance(iter);
107 		} else {
108 			pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
109 
110 			if (!get_more)
111 				break;
112 
113 			unsigned sectors_remaining = sectors_this_extent - bio_sectors(bio);
114 
115 			if (sectors_remaining < PAGE_SECTORS << mapping_min_folio_order(iter->mapping))
116 				break;
117 
118 			unsigned order = ilog2(rounddown_pow_of_two(sectors_remaining) / PAGE_SECTORS);
119 
120 			/* ensure proper alignment */
121 			order = min(order, __ffs(folio_offset|BIT(31)));
122 
123 			folio = xa_load(&iter->mapping->i_pages, folio_offset);
124 			if (folio && !xa_is_value(folio))
125 				break;
126 
127 			folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), order);
128 			if (!folio)
129 				break;
130 
131 			if (!__bch2_folio_create(folio, GFP_KERNEL)) {
132 				folio_put(folio);
133 				break;
134 			}
135 
136 			ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
137 			if (ret) {
138 				__bch2_folio_release(folio);
139 				folio_put(folio);
140 				break;
141 			}
142 
143 			folio_put(folio);
144 		}
145 
146 		BUG_ON(folio_sector(folio) != bio_end_sector(bio));
147 
148 		BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
149 	}
150 
151 	return bch2_trans_relock(trans);
152 }
153 
bchfs_read(struct btree_trans * trans,struct bch_read_bio * rbio,subvol_inum inum,struct readpages_iter * readpages_iter)154 static void bchfs_read(struct btree_trans *trans,
155 		       struct bch_read_bio *rbio,
156 		       subvol_inum inum,
157 		       struct readpages_iter *readpages_iter)
158 {
159 	struct bch_fs *c = trans->c;
160 	struct btree_iter iter;
161 	struct bkey_buf sk;
162 	int flags = BCH_READ_retry_if_stale|
163 		BCH_READ_may_promote;
164 	int ret = 0;
165 
166 	rbio->subvol = inum.subvol;
167 
168 	bch2_bkey_buf_init(&sk);
169 	bch2_trans_begin(trans);
170 	bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
171 			     POS(inum.inum, rbio->bio.bi_iter.bi_sector),
172 			     BTREE_ITER_slots);
173 	while (1) {
174 		struct bkey_s_c k;
175 		unsigned bytes, sectors;
176 		s64 offset_into_extent;
177 		enum btree_id data_btree = BTREE_ID_extents;
178 
179 		bch2_trans_begin(trans);
180 
181 		u32 snapshot;
182 		ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
183 		if (ret)
184 			goto err;
185 
186 		bch2_btree_iter_set_snapshot(trans, &iter, snapshot);
187 
188 		bch2_btree_iter_set_pos(trans, &iter,
189 				POS(inum.inum, rbio->bio.bi_iter.bi_sector));
190 
191 		k = bch2_btree_iter_peek_slot(trans, &iter);
192 		ret = bkey_err(k);
193 		if (ret)
194 			goto err;
195 
196 		offset_into_extent = iter.pos.offset -
197 			bkey_start_offset(k.k);
198 		sectors = k.k->size - offset_into_extent;
199 
200 		bch2_bkey_buf_reassemble(&sk, c, k);
201 
202 		ret = bch2_read_indirect_extent(trans, &data_btree,
203 					&offset_into_extent, &sk);
204 		if (ret)
205 			goto err;
206 
207 		k = bkey_i_to_s_c(sk.k);
208 
209 		sectors = min_t(unsigned, sectors, k.k->size - offset_into_extent);
210 
211 		if (readpages_iter) {
212 			ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
213 						  extent_partial_reads_expensive(k));
214 			if (ret)
215 				goto err;
216 		}
217 
218 		bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
219 		swap(rbio->bio.bi_iter.bi_size, bytes);
220 
221 		if (rbio->bio.bi_iter.bi_size == bytes)
222 			flags |= BCH_READ_last_fragment;
223 
224 		bch2_bio_page_state_set(&rbio->bio, k);
225 
226 		bch2_read_extent(trans, rbio, iter.pos,
227 				 data_btree, k, offset_into_extent, flags);
228 		/*
229 		 * Careful there's a landmine here if bch2_read_extent() ever
230 		 * starts returning transaction restarts here.
231 		 *
232 		 * We've changed rbio->bi_iter.bi_size to be "bytes we can read
233 		 * from this extent" with the swap call, and we restore it
234 		 * below. That restore needs to come before checking for
235 		 * errors.
236 		 *
237 		 * But unlike __bch2_read(), we use the rbio bvec iter, not one
238 		 * on the stack, so we can't do the restore right after the
239 		 * bch2_read_extent() call: we don't own that iterator anymore
240 		 * if BCH_READ_last_fragment is set, since we may have submitted
241 		 * that rbio instead of cloning it.
242 		 */
243 
244 		if (flags & BCH_READ_last_fragment)
245 			break;
246 
247 		swap(rbio->bio.bi_iter.bi_size, bytes);
248 		bio_advance(&rbio->bio, bytes);
249 err:
250 		if (ret &&
251 		    !bch2_err_matches(ret, BCH_ERR_transaction_restart))
252 			break;
253 	}
254 	bch2_trans_iter_exit(trans, &iter);
255 
256 	if (ret) {
257 		struct printbuf buf = PRINTBUF;
258 		lockrestart_do(trans,
259 			bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter.pos.offset << 9));
260 		prt_printf(&buf, "read error %i from btree lookup", ret);
261 		bch_err_ratelimited(c, "%s", buf.buf);
262 		printbuf_exit(&buf);
263 
264 		rbio->bio.bi_status = BLK_STS_IOERR;
265 		bio_endio(&rbio->bio);
266 	}
267 
268 	bch2_bkey_buf_exit(&sk, c);
269 }
270 
bch2_readahead(struct readahead_control * ractl)271 void bch2_readahead(struct readahead_control *ractl)
272 {
273 	struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
274 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
275 	struct bch_io_opts opts;
276 	struct folio *folio;
277 	struct readpages_iter readpages_iter;
278 	struct blk_plug plug;
279 
280 	bch2_inode_opts_get(&opts, c, &inode->ei_inode);
281 
282 	int ret = readpages_iter_init(&readpages_iter, ractl);
283 	if (ret)
284 		return;
285 
286 	/*
287 	 * Besides being a general performance optimization, plugging helps with
288 	 * avoiding btree transaction srcu warnings - submitting a bio can
289 	 * block, and we don't want todo that with the transaction locked.
290 	 *
291 	 * However, plugged bios are submitted when we schedule; we ideally
292 	 * would have our own scheduler hook to call unlock_long() before
293 	 * scheduling.
294 	 */
295 	blk_start_plug(&plug);
296 	bch2_pagecache_add_get(inode);
297 
298 	struct btree_trans *trans = bch2_trans_get(c);
299 	while ((folio = readpage_iter_peek(&readpages_iter))) {
300 		unsigned n = min_t(unsigned,
301 				   readpages_iter.folios.nr -
302 				   readpages_iter.idx,
303 				   BIO_MAX_VECS);
304 		struct bch_read_bio *rbio =
305 			rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
306 						   GFP_KERNEL, &c->bio_read),
307 				  c,
308 				  opts,
309 				  bch2_readpages_end_io);
310 
311 		readpage_iter_advance(&readpages_iter);
312 
313 		rbio->bio.bi_iter.bi_sector = folio_sector(folio);
314 		BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
315 
316 		bchfs_read(trans, rbio, inode_inum(inode),
317 			   &readpages_iter);
318 		bch2_trans_unlock(trans);
319 	}
320 	bch2_trans_put(trans);
321 
322 	bch2_pagecache_add_put(inode);
323 	blk_finish_plug(&plug);
324 	darray_exit(&readpages_iter.folios);
325 }
326 
bch2_read_single_folio_end_io(struct bio * bio)327 static void bch2_read_single_folio_end_io(struct bio *bio)
328 {
329 	complete(bio->bi_private);
330 }
331 
bch2_read_single_folio(struct folio * folio,struct address_space * mapping)332 int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
333 {
334 	struct bch_inode_info *inode = to_bch_ei(mapping->host);
335 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
336 	struct bch_read_bio *rbio;
337 	struct bch_io_opts opts;
338 	struct blk_plug plug;
339 	int ret;
340 	DECLARE_COMPLETION_ONSTACK(done);
341 
342 	BUG_ON(folio_test_uptodate(folio));
343 	BUG_ON(folio_test_dirty(folio));
344 
345 	if (!bch2_folio_create(folio, GFP_KERNEL))
346 		return -ENOMEM;
347 
348 	bch2_inode_opts_get(&opts, c, &inode->ei_inode);
349 
350 	rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
351 			 c,
352 			 opts,
353 			 bch2_read_single_folio_end_io);
354 	rbio->bio.bi_private = &done;
355 	rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
356 	rbio->bio.bi_iter.bi_sector = folio_sector(folio);
357 	BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
358 
359 	blk_start_plug(&plug);
360 	bch2_trans_run(c, (bchfs_read(trans, rbio, inode_inum(inode), NULL), 0));
361 	blk_finish_plug(&plug);
362 	wait_for_completion(&done);
363 
364 	ret = blk_status_to_errno(rbio->bio.bi_status);
365 	bio_put(&rbio->bio);
366 
367 	if (ret < 0)
368 		return ret;
369 
370 	folio_mark_uptodate(folio);
371 	return 0;
372 }
373 
bch2_read_folio(struct file * file,struct folio * folio)374 int bch2_read_folio(struct file *file, struct folio *folio)
375 {
376 	int ret;
377 
378 	ret = bch2_read_single_folio(folio, folio->mapping);
379 	folio_unlock(folio);
380 	return bch2_err_class(ret);
381 }
382 
383 /* writepages: */
384 
385 struct bch_writepage_io {
386 	struct bch_inode_info		*inode;
387 
388 	/* must be last: */
389 	struct bch_write_op		op;
390 };
391 
392 struct bch_writepage_state {
393 	struct bch_writepage_io	*io;
394 	struct bch_io_opts	opts;
395 	struct bch_folio_sector	*tmp;
396 	unsigned		tmp_sectors;
397 	struct blk_plug		plug;
398 };
399 
400 /*
401  * Determine when a writepage io is full. We have to limit writepage bios to a
402  * single page per bvec (i.e. 1MB with 4k pages) because that is the limit to
403  * what the bounce path in bch2_write_extent() can handle. In theory we could
404  * loosen this restriction for non-bounce I/O, but we don't have that context
405  * here. Ideally, we can up this limit and make it configurable in the future
406  * when the bounce path can be enhanced to accommodate larger source bios.
407  */
bch_io_full(struct bch_writepage_io * io,unsigned len)408 static inline bool bch_io_full(struct bch_writepage_io *io, unsigned len)
409 {
410 	struct bio *bio = &io->op.wbio.bio;
411 	return bio_full(bio, len) ||
412 		(bio->bi_iter.bi_size + len > BIO_MAX_VECS * PAGE_SIZE);
413 }
414 
bch2_writepage_io_done(struct bch_write_op * op)415 static void bch2_writepage_io_done(struct bch_write_op *op)
416 {
417 	struct bch_writepage_io *io =
418 		container_of(op, struct bch_writepage_io, op);
419 	struct bch_fs *c = io->op.c;
420 	struct bio *bio = &io->op.wbio.bio;
421 	struct folio_iter fi;
422 	unsigned i;
423 
424 	if (io->op.error) {
425 		set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
426 
427 		bio_for_each_folio_all(fi, bio) {
428 			struct bch_folio *s;
429 
430 			mapping_set_error(fi.folio->mapping, -EIO);
431 
432 			s = __bch2_folio(fi.folio);
433 			spin_lock(&s->lock);
434 			for (i = 0; i < folio_sectors(fi.folio); i++)
435 				s->s[i].nr_replicas = 0;
436 			spin_unlock(&s->lock);
437 		}
438 	}
439 
440 	if (io->op.flags & BCH_WRITE_wrote_data_inline) {
441 		bio_for_each_folio_all(fi, bio) {
442 			struct bch_folio *s;
443 
444 			s = __bch2_folio(fi.folio);
445 			spin_lock(&s->lock);
446 			for (i = 0; i < folio_sectors(fi.folio); i++)
447 				s->s[i].nr_replicas = 0;
448 			spin_unlock(&s->lock);
449 		}
450 	}
451 
452 	/*
453 	 * racing with fallocate can cause us to add fewer sectors than
454 	 * expected - but we shouldn't add more sectors than expected:
455 	 */
456 	WARN_ON_ONCE(io->op.i_sectors_delta > 0);
457 
458 	/*
459 	 * (error (due to going RO) halfway through a page can screw that up
460 	 * slightly)
461 	 * XXX wtf?
462 	   BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
463 	 */
464 
465 	/*
466 	 * The writeback flag is effectively our ref on the inode -
467 	 * fixup i_blocks before calling folio_end_writeback:
468 	 */
469 	bch2_i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
470 
471 	bio_for_each_folio_all(fi, bio) {
472 		struct bch_folio *s = __bch2_folio(fi.folio);
473 
474 		if (atomic_dec_and_test(&s->write_count))
475 			folio_end_writeback(fi.folio);
476 	}
477 
478 	bio_put(&io->op.wbio.bio);
479 }
480 
bch2_writepage_do_io(struct bch_writepage_state * w)481 static void bch2_writepage_do_io(struct bch_writepage_state *w)
482 {
483 	struct bch_writepage_io *io = w->io;
484 
485 	w->io = NULL;
486 	closure_call(&io->op.cl, bch2_write, NULL, NULL);
487 }
488 
489 /*
490  * Get a bch_writepage_io and add @page to it - appending to an existing one if
491  * possible, else allocating a new one:
492  */
bch2_writepage_io_alloc(struct bch_fs * c,struct writeback_control * wbc,struct bch_writepage_state * w,struct bch_inode_info * inode,u64 sector,unsigned nr_replicas)493 static void bch2_writepage_io_alloc(struct bch_fs *c,
494 				    struct writeback_control *wbc,
495 				    struct bch_writepage_state *w,
496 				    struct bch_inode_info *inode,
497 				    u64 sector,
498 				    unsigned nr_replicas)
499 {
500 	struct bch_write_op *op;
501 
502 	w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
503 					      REQ_OP_WRITE,
504 					      GFP_KERNEL,
505 					      &c->writepage_bioset),
506 			     struct bch_writepage_io, op.wbio.bio);
507 
508 	w->io->inode		= inode;
509 	op			= &w->io->op;
510 	bch2_write_op_init(op, c, w->opts);
511 	op->target		= w->opts.foreground_target;
512 	op->nr_replicas		= nr_replicas;
513 	op->res.nr_replicas	= nr_replicas;
514 	op->write_point		= writepoint_hashed(inode->ei_last_dirtied);
515 	op->subvol		= inode->ei_inum.subvol;
516 	op->pos			= POS(inode->v.i_ino, sector);
517 	op->end_io		= bch2_writepage_io_done;
518 	op->devs_need_flush	= &inode->ei_devs_need_flush;
519 	op->wbio.bio.bi_iter.bi_sector = sector;
520 	op->wbio.bio.bi_opf	= wbc_to_write_flags(wbc);
521 }
522 
__bch2_writepage(struct folio * folio,struct writeback_control * wbc,void * data)523 static int __bch2_writepage(struct folio *folio,
524 			    struct writeback_control *wbc,
525 			    void *data)
526 {
527 	struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
528 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
529 	struct bch_writepage_state *w = data;
530 	struct bch_folio *s;
531 	unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
532 	loff_t i_size = i_size_read(&inode->v);
533 	int ret;
534 
535 	EBUG_ON(!folio_test_uptodate(folio));
536 
537 	/* Is the folio fully inside i_size? */
538 	if (folio_end_pos(folio) <= i_size)
539 		goto do_io;
540 
541 	/* Is the folio fully outside i_size? (truncate in progress) */
542 	if (folio_pos(folio) >= i_size) {
543 		folio_unlock(folio);
544 		return 0;
545 	}
546 
547 	/*
548 	 * The folio straddles i_size.  It must be zeroed out on each and every
549 	 * writepage invocation because it may be mmapped.  "A file is mapped
550 	 * in multiples of the folio size.  For a file that is not a multiple of
551 	 * the  folio size, the remaining memory is zeroed when mapped, and
552 	 * writes to that region are not written out to the file."
553 	 */
554 	folio_zero_segment(folio,
555 			   i_size - folio_pos(folio),
556 			   folio_size(folio));
557 do_io:
558 	f_sectors = folio_sectors(folio);
559 	s = bch2_folio(folio);
560 
561 	if (f_sectors > w->tmp_sectors) {
562 		kfree(w->tmp);
563 		w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), GFP_NOFS|__GFP_NOFAIL);
564 		w->tmp_sectors = f_sectors;
565 	}
566 
567 	/*
568 	 * Things get really hairy with errors during writeback:
569 	 */
570 	ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
571 	BUG_ON(ret);
572 
573 	/* Before unlocking the page, get copy of reservations: */
574 	spin_lock(&s->lock);
575 	memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
576 
577 	for (i = 0; i < f_sectors; i++) {
578 		if (s->s[i].state < SECTOR_dirty)
579 			continue;
580 
581 		nr_replicas_this_write =
582 			min_t(unsigned, nr_replicas_this_write,
583 			      s->s[i].nr_replicas +
584 			      s->s[i].replicas_reserved);
585 	}
586 
587 	for (i = 0; i < f_sectors; i++) {
588 		if (s->s[i].state < SECTOR_dirty)
589 			continue;
590 
591 		s->s[i].nr_replicas = w->opts.compression
592 			? 0 : nr_replicas_this_write;
593 
594 		s->s[i].replicas_reserved = 0;
595 		bch2_folio_sector_set(folio, s, i, SECTOR_allocated);
596 	}
597 	spin_unlock(&s->lock);
598 
599 	BUG_ON(atomic_read(&s->write_count));
600 	atomic_set(&s->write_count, 1);
601 
602 	BUG_ON(folio_test_writeback(folio));
603 	folio_start_writeback(folio);
604 
605 	folio_unlock(folio);
606 
607 	offset = 0;
608 	while (1) {
609 		unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
610 		u64 sector;
611 
612 		while (offset < f_sectors &&
613 		       w->tmp[offset].state < SECTOR_dirty)
614 			offset++;
615 
616 		if (offset == f_sectors)
617 			break;
618 
619 		while (offset + sectors < f_sectors &&
620 		       w->tmp[offset + sectors].state >= SECTOR_dirty) {
621 			reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
622 			dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
623 			sectors++;
624 		}
625 		BUG_ON(!sectors);
626 
627 		sector = folio_sector(folio) + offset;
628 
629 		if (w->io &&
630 		    (w->io->op.res.nr_replicas != nr_replicas_this_write ||
631 		     bch_io_full(w->io, sectors << 9) ||
632 		     bio_end_sector(&w->io->op.wbio.bio) != sector))
633 			bch2_writepage_do_io(w);
634 
635 		if (!w->io)
636 			bch2_writepage_io_alloc(c, wbc, w, inode, sector,
637 						nr_replicas_this_write);
638 
639 		atomic_inc(&s->write_count);
640 
641 		BUG_ON(inode != w->io->inode);
642 		BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
643 				     sectors << 9, offset << 9));
644 
645 		w->io->op.res.sectors += reserved_sectors;
646 		w->io->op.i_sectors_delta -= dirty_sectors;
647 		w->io->op.new_i_size = i_size;
648 
649 		offset += sectors;
650 	}
651 
652 	if (atomic_dec_and_test(&s->write_count))
653 		folio_end_writeback(folio);
654 
655 	return 0;
656 }
657 
bch2_writepages(struct address_space * mapping,struct writeback_control * wbc)658 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
659 {
660 	struct bch_fs *c = mapping->host->i_sb->s_fs_info;
661 	struct bch_writepage_state *w = kzalloc(sizeof(*w), GFP_NOFS|__GFP_NOFAIL);
662 
663 	bch2_inode_opts_get(&w->opts, c, &to_bch_ei(mapping->host)->ei_inode);
664 
665 	blk_start_plug(&w->plug);
666 	int ret = write_cache_pages(mapping, wbc, __bch2_writepage, w);
667 	if (w->io)
668 		bch2_writepage_do_io(w);
669 	blk_finish_plug(&w->plug);
670 	kfree(w->tmp);
671 	kfree(w);
672 	return bch2_err_class(ret);
673 }
674 
675 /* buffered writes: */
676 
bch2_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)677 int bch2_write_begin(struct file *file, struct address_space *mapping,
678 		     loff_t pos, unsigned len,
679 		     struct folio **foliop, void **fsdata)
680 {
681 	struct bch_inode_info *inode = to_bch_ei(mapping->host);
682 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
683 	struct bch2_folio_reservation *res;
684 	struct folio *folio;
685 	unsigned offset;
686 	int ret = -ENOMEM;
687 
688 	res = kmalloc(sizeof(*res), GFP_KERNEL);
689 	if (!res)
690 		return -ENOMEM;
691 
692 	bch2_folio_reservation_init(c, inode, res);
693 	*fsdata = res;
694 
695 	bch2_pagecache_add_get(inode);
696 
697 	folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
698 				    FGP_WRITEBEGIN | fgf_set_order(len),
699 				    mapping_gfp_mask(mapping));
700 	if (IS_ERR(folio))
701 		goto err_unlock;
702 
703 	offset = pos - folio_pos(folio);
704 	len = min_t(size_t, len, folio_end_pos(folio) - pos);
705 
706 	if (folio_test_uptodate(folio))
707 		goto out;
708 
709 	/* If we're writing entire folio, don't need to read it in first: */
710 	if (!offset && len == folio_size(folio))
711 		goto out;
712 
713 	if (!offset && pos + len >= inode->v.i_size) {
714 		folio_zero_segment(folio, len, folio_size(folio));
715 		flush_dcache_folio(folio);
716 		goto out;
717 	}
718 
719 	if (folio_pos(folio) >= inode->v.i_size) {
720 		folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
721 		flush_dcache_folio(folio);
722 		goto out;
723 	}
724 readpage:
725 	ret = bch2_read_single_folio(folio, mapping);
726 	if (ret)
727 		goto err;
728 out:
729 	ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
730 	if (ret)
731 		goto err;
732 
733 	ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
734 	if (ret) {
735 		if (!folio_test_uptodate(folio)) {
736 			/*
737 			 * If the folio hasn't been read in, we won't know if we
738 			 * actually need a reservation - we don't actually need
739 			 * to read here, we just need to check if the folio is
740 			 * fully backed by uncompressed data:
741 			 */
742 			goto readpage;
743 		}
744 
745 		goto err;
746 	}
747 
748 	*foliop = folio;
749 	return 0;
750 err:
751 	folio_unlock(folio);
752 	folio_put(folio);
753 err_unlock:
754 	bch2_pagecache_add_put(inode);
755 	kfree(res);
756 	*fsdata = NULL;
757 	return bch2_err_class(ret);
758 }
759 
bch2_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)760 int bch2_write_end(struct file *file, struct address_space *mapping,
761 		   loff_t pos, unsigned len, unsigned copied,
762 		   struct folio *folio, void *fsdata)
763 {
764 	struct bch_inode_info *inode = to_bch_ei(mapping->host);
765 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
766 	struct bch2_folio_reservation *res = fsdata;
767 	unsigned offset = pos - folio_pos(folio);
768 
769 	lockdep_assert_held(&inode->v.i_rwsem);
770 	BUG_ON(offset + copied > folio_size(folio));
771 
772 	if (unlikely(copied < len && !folio_test_uptodate(folio))) {
773 		/*
774 		 * The folio needs to be read in, but that would destroy
775 		 * our partial write - simplest thing is to just force
776 		 * userspace to redo the write:
777 		 */
778 		folio_zero_range(folio, 0, folio_size(folio));
779 		flush_dcache_folio(folio);
780 		copied = 0;
781 	}
782 
783 	spin_lock(&inode->v.i_lock);
784 	if (pos + copied > inode->v.i_size)
785 		i_size_write(&inode->v, pos + copied);
786 	spin_unlock(&inode->v.i_lock);
787 
788 	if (copied) {
789 		if (!folio_test_uptodate(folio))
790 			folio_mark_uptodate(folio);
791 
792 		bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
793 
794 		inode->ei_last_dirtied = (unsigned long) current;
795 	}
796 
797 	folio_unlock(folio);
798 	folio_put(folio);
799 	bch2_pagecache_add_put(inode);
800 
801 	bch2_folio_reservation_put(c, inode, res);
802 	kfree(res);
803 
804 	return copied;
805 }
806 
folios_trunc(folios * fs,struct folio ** fi)807 static noinline void folios_trunc(folios *fs, struct folio **fi)
808 {
809 	while (fs->data + fs->nr > fi) {
810 		struct folio *f = darray_pop(fs);
811 
812 		folio_unlock(f);
813 		folio_put(f);
814 	}
815 }
816 
__bch2_buffered_write(struct bch_inode_info * inode,struct address_space * mapping,struct iov_iter * iter,loff_t pos,unsigned len)817 static int __bch2_buffered_write(struct bch_inode_info *inode,
818 				 struct address_space *mapping,
819 				 struct iov_iter *iter,
820 				 loff_t pos, unsigned len)
821 {
822 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
823 	struct bch2_folio_reservation res;
824 	folios fs;
825 	struct folio *f;
826 	unsigned copied = 0, f_offset, f_copied;
827 	u64 end = pos + len, f_pos, f_len;
828 	loff_t last_folio_pos = inode->v.i_size;
829 	int ret = 0;
830 
831 	BUG_ON(!len);
832 
833 	bch2_folio_reservation_init(c, inode, &res);
834 	darray_init(&fs);
835 
836 	ret = bch2_filemap_get_contig_folios_d(mapping, pos, end,
837 					       FGP_WRITEBEGIN | fgf_set_order(len),
838 					       mapping_gfp_mask(mapping), &fs);
839 	if (ret)
840 		goto out;
841 
842 	BUG_ON(!fs.nr);
843 
844 	f = darray_first(fs);
845 	if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
846 		ret = bch2_read_single_folio(f, mapping);
847 		if (ret)
848 			goto out;
849 	}
850 
851 	f = darray_last(fs);
852 	end = min(end, folio_end_pos(f));
853 	last_folio_pos = folio_pos(f);
854 	if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
855 		if (end >= inode->v.i_size) {
856 			folio_zero_range(f, 0, folio_size(f));
857 		} else {
858 			ret = bch2_read_single_folio(f, mapping);
859 			if (ret)
860 				goto out;
861 		}
862 	}
863 
864 	ret = bch2_folio_set(c, inode_inum(inode), fs.data, fs.nr);
865 	if (ret)
866 		goto out;
867 
868 	f_pos = pos;
869 	f_offset = pos - folio_pos(darray_first(fs));
870 	darray_for_each(fs, fi) {
871 		ssize_t f_reserved;
872 
873 		f = *fi;
874 		f_len = min(end, folio_end_pos(f)) - f_pos;
875 		f_reserved = bch2_folio_reservation_get_partial(c, inode, f, &res, f_offset, f_len);
876 
877 		if (unlikely(f_reserved != f_len)) {
878 			if (f_reserved < 0) {
879 				if (f == darray_first(fs)) {
880 					ret = f_reserved;
881 					goto out;
882 				}
883 
884 				folios_trunc(&fs, fi);
885 				end = min(end, folio_end_pos(darray_last(fs)));
886 			} else {
887 				if (!folio_test_uptodate(f)) {
888 					ret = bch2_read_single_folio(f, mapping);
889 					if (ret)
890 						goto out;
891 				}
892 
893 				folios_trunc(&fs, fi + 1);
894 				end = f_pos + f_reserved;
895 			}
896 
897 			break;
898 		}
899 
900 		f_pos = folio_end_pos(f);
901 		f_offset = 0;
902 	}
903 
904 	if (mapping_writably_mapped(mapping))
905 		darray_for_each(fs, fi)
906 			flush_dcache_folio(*fi);
907 
908 	f_pos = pos;
909 	f_offset = pos - folio_pos(darray_first(fs));
910 	darray_for_each(fs, fi) {
911 		f = *fi;
912 		f_len = min(end, folio_end_pos(f)) - f_pos;
913 		f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
914 		if (!f_copied) {
915 			folios_trunc(&fs, fi);
916 			break;
917 		}
918 
919 		if (!folio_test_uptodate(f) &&
920 		    f_copied != folio_size(f) &&
921 		    pos + copied + f_copied < inode->v.i_size) {
922 			iov_iter_revert(iter, f_copied);
923 			folio_zero_range(f, 0, folio_size(f));
924 			folios_trunc(&fs, fi);
925 			break;
926 		}
927 
928 		flush_dcache_folio(f);
929 		copied += f_copied;
930 
931 		if (f_copied != f_len) {
932 			folios_trunc(&fs, fi + 1);
933 			break;
934 		}
935 
936 		f_pos = folio_end_pos(f);
937 		f_offset = 0;
938 	}
939 
940 	if (!copied)
941 		goto out;
942 
943 	end = pos + copied;
944 
945 	spin_lock(&inode->v.i_lock);
946 	if (end > inode->v.i_size)
947 		i_size_write(&inode->v, end);
948 	spin_unlock(&inode->v.i_lock);
949 
950 	f_pos = pos;
951 	f_offset = pos - folio_pos(darray_first(fs));
952 	darray_for_each(fs, fi) {
953 		f = *fi;
954 		f_len = min(end, folio_end_pos(f)) - f_pos;
955 
956 		if (!folio_test_uptodate(f))
957 			folio_mark_uptodate(f);
958 
959 		bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
960 
961 		f_pos = folio_end_pos(f);
962 		f_offset = 0;
963 	}
964 
965 	inode->ei_last_dirtied = (unsigned long) current;
966 out:
967 	darray_for_each(fs, fi) {
968 		folio_unlock(*fi);
969 		folio_put(*fi);
970 	}
971 
972 	/*
973 	 * If the last folio added to the mapping starts beyond current EOF, we
974 	 * performed a short write but left around at least one post-EOF folio.
975 	 * Clean up the mapping before we return.
976 	 */
977 	if (last_folio_pos >= inode->v.i_size)
978 		truncate_pagecache(&inode->v, inode->v.i_size);
979 
980 	darray_exit(&fs);
981 	bch2_folio_reservation_put(c, inode, &res);
982 
983 	return copied ?: ret;
984 }
985 
bch2_buffered_write(struct kiocb * iocb,struct iov_iter * iter)986 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
987 {
988 	struct file *file = iocb->ki_filp;
989 	struct address_space *mapping = file->f_mapping;
990 	struct bch_inode_info *inode = file_bch_inode(file);
991 	loff_t pos = iocb->ki_pos;
992 	ssize_t written = 0;
993 	int ret = 0;
994 
995 	bch2_pagecache_add_get(inode);
996 
997 	do {
998 		unsigned offset = pos & (PAGE_SIZE - 1);
999 		unsigned bytes = iov_iter_count(iter);
1000 again:
1001 		/*
1002 		 * Bring in the user page that we will copy from _first_.
1003 		 * Otherwise there's a nasty deadlock on copying from the
1004 		 * same page as we're writing to, without it being marked
1005 		 * up-to-date.
1006 		 *
1007 		 * Not only is this an optimisation, but it is also required
1008 		 * to check that the address is actually valid, when atomic
1009 		 * usercopies are used, below.
1010 		 */
1011 		if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1012 			bytes = min_t(unsigned long, iov_iter_count(iter),
1013 				      PAGE_SIZE - offset);
1014 
1015 			if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
1016 				ret = -EFAULT;
1017 				break;
1018 			}
1019 		}
1020 
1021 		if (unlikely(fatal_signal_pending(current))) {
1022 			ret = -EINTR;
1023 			break;
1024 		}
1025 
1026 		ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1027 		if (unlikely(ret < 0))
1028 			break;
1029 
1030 		cond_resched();
1031 
1032 		if (unlikely(ret == 0)) {
1033 			/*
1034 			 * If we were unable to copy any data at all, we must
1035 			 * fall back to a single segment length write.
1036 			 *
1037 			 * If we didn't fallback here, we could livelock
1038 			 * because not all segments in the iov can be copied at
1039 			 * once without a pagefault.
1040 			 */
1041 			bytes = min_t(unsigned long, PAGE_SIZE - offset,
1042 				      iov_iter_single_seg_count(iter));
1043 			goto again;
1044 		}
1045 		pos += ret;
1046 		written += ret;
1047 		ret = 0;
1048 
1049 		balance_dirty_pages_ratelimited(mapping);
1050 	} while (iov_iter_count(iter));
1051 
1052 	bch2_pagecache_add_put(inode);
1053 
1054 	return written ? written : ret;
1055 }
1056 
bch2_write_iter(struct kiocb * iocb,struct iov_iter * from)1057 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1058 {
1059 	struct file *file = iocb->ki_filp;
1060 	struct bch_inode_info *inode = file_bch_inode(file);
1061 	ssize_t ret;
1062 
1063 	if (iocb->ki_flags & IOCB_DIRECT) {
1064 		ret = bch2_direct_write(iocb, from);
1065 		goto out;
1066 	}
1067 
1068 	inode_lock(&inode->v);
1069 
1070 	ret = generic_write_checks(iocb, from);
1071 	if (ret <= 0)
1072 		goto unlock;
1073 
1074 	ret = file_remove_privs(file);
1075 	if (ret)
1076 		goto unlock;
1077 
1078 	ret = file_update_time(file);
1079 	if (ret)
1080 		goto unlock;
1081 
1082 	ret = bch2_buffered_write(iocb, from);
1083 	if (likely(ret > 0))
1084 		iocb->ki_pos += ret;
1085 unlock:
1086 	inode_unlock(&inode->v);
1087 
1088 	if (ret > 0)
1089 		ret = generic_write_sync(iocb, ret);
1090 out:
1091 	return bch2_err_class(ret);
1092 }
1093 
bch2_fs_fs_io_buffered_exit(struct bch_fs * c)1094 void bch2_fs_fs_io_buffered_exit(struct bch_fs *c)
1095 {
1096 	bioset_exit(&c->writepage_bioset);
1097 }
1098 
bch2_fs_fs_io_buffered_init(struct bch_fs * c)1099 int bch2_fs_fs_io_buffered_init(struct bch_fs *c)
1100 {
1101 	if (bioset_init(&c->writepage_bioset,
1102 			4, offsetof(struct bch_writepage_io, op.wbio.bio),
1103 			BIOSET_NEED_BVECS))
1104 		return -BCH_ERR_ENOMEM_writepage_bioset_init;
1105 
1106 	return 0;
1107 }
1108 
1109 #endif /* NO_BCACHEFS_FS */
1110