xref: /linux/fs/bcachefs/fs-io-buffered.c (revision 2622f290417001b0440f4a48dc6978f5f1e12a56)
1 // SPDX-License-Identifier: GPL-2.0
2 #ifndef NO_BCACHEFS_FS
3 
4 #include "bcachefs.h"
5 #include "alloc_foreground.h"
6 #include "bkey_buf.h"
7 #include "fs-io.h"
8 #include "fs-io-buffered.h"
9 #include "fs-io-direct.h"
10 #include "fs-io-pagecache.h"
11 #include "io_read.h"
12 #include "io_write.h"
13 
14 #include <linux/backing-dev.h>
15 #include <linux/pagemap.h>
16 #include <linux/writeback.h>
17 
bio_full(struct bio * bio,unsigned len)18 static inline bool bio_full(struct bio *bio, unsigned len)
19 {
20 	if (bio->bi_vcnt >= bio->bi_max_vecs)
21 		return true;
22 	if (bio->bi_iter.bi_size > UINT_MAX - len)
23 		return true;
24 	return false;
25 }
26 
27 /* readpage(s): */
28 
bch2_readpages_end_io(struct bio * bio)29 static void bch2_readpages_end_io(struct bio *bio)
30 {
31 	struct folio_iter fi;
32 
33 	bio_for_each_folio_all(fi, bio)
34 		folio_end_read(fi.folio, bio->bi_status == BLK_STS_OK);
35 
36 	bio_put(bio);
37 }
38 
39 struct readpages_iter {
40 	struct address_space	*mapping;
41 	unsigned		idx;
42 	folios			folios;
43 };
44 
readpages_iter_init(struct readpages_iter * iter,struct readahead_control * ractl)45 static int readpages_iter_init(struct readpages_iter *iter,
46 			       struct readahead_control *ractl)
47 {
48 	struct folio *folio;
49 
50 	*iter = (struct readpages_iter) { ractl->mapping };
51 
52 	while ((folio = __readahead_folio(ractl))) {
53 		if (!bch2_folio_create(folio, GFP_KERNEL) ||
54 		    darray_push(&iter->folios, folio)) {
55 			bch2_folio_release(folio);
56 			ractl->_nr_pages += folio_nr_pages(folio);
57 			ractl->_index -= folio_nr_pages(folio);
58 			return iter->folios.nr ? 0 : -ENOMEM;
59 		}
60 
61 		folio_put(folio);
62 	}
63 
64 	return 0;
65 }
66 
readpage_iter_peek(struct readpages_iter * iter)67 static inline struct folio *readpage_iter_peek(struct readpages_iter *iter)
68 {
69 	if (iter->idx >= iter->folios.nr)
70 		return NULL;
71 	return iter->folios.data[iter->idx];
72 }
73 
readpage_iter_advance(struct readpages_iter * iter)74 static inline void readpage_iter_advance(struct readpages_iter *iter)
75 {
76 	iter->idx++;
77 }
78 
extent_partial_reads_expensive(struct bkey_s_c k)79 static bool extent_partial_reads_expensive(struct bkey_s_c k)
80 {
81 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
82 	struct bch_extent_crc_unpacked crc;
83 	const union bch_extent_entry *i;
84 
85 	bkey_for_each_crc(k.k, ptrs, crc, i)
86 		if (crc.csum_type || crc.compression_type)
87 			return true;
88 	return false;
89 }
90 
readpage_bio_extend(struct btree_trans * trans,struct readpages_iter * iter,struct bio * bio,unsigned sectors_this_extent,bool get_more)91 static int readpage_bio_extend(struct btree_trans *trans,
92 			       struct readpages_iter *iter,
93 			       struct bio *bio,
94 			       unsigned sectors_this_extent,
95 			       bool get_more)
96 {
97 	/* Don't hold btree locks while allocating memory: */
98 	bch2_trans_unlock(trans);
99 
100 	while (bio_sectors(bio) < sectors_this_extent &&
101 	       bio->bi_vcnt < bio->bi_max_vecs) {
102 		struct folio *folio = readpage_iter_peek(iter);
103 		int ret;
104 
105 		if (folio) {
106 			readpage_iter_advance(iter);
107 		} else {
108 			pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT;
109 
110 			if (!get_more)
111 				break;
112 
113 			folio = xa_load(&iter->mapping->i_pages, folio_offset);
114 			if (folio && !xa_is_value(folio))
115 				break;
116 
117 			folio = filemap_alloc_folio(readahead_gfp_mask(iter->mapping), 0);
118 			if (!folio)
119 				break;
120 
121 			if (!__bch2_folio_create(folio, GFP_KERNEL)) {
122 				folio_put(folio);
123 				break;
124 			}
125 
126 			ret = filemap_add_folio(iter->mapping, folio, folio_offset, GFP_KERNEL);
127 			if (ret) {
128 				__bch2_folio_release(folio);
129 				folio_put(folio);
130 				break;
131 			}
132 
133 			folio_put(folio);
134 		}
135 
136 		BUG_ON(folio_sector(folio) != bio_end_sector(bio));
137 
138 		BUG_ON(!bio_add_folio(bio, folio, folio_size(folio), 0));
139 	}
140 
141 	return bch2_trans_relock(trans);
142 }
143 
bchfs_read(struct btree_trans * trans,struct bch_read_bio * rbio,subvol_inum inum,struct readpages_iter * readpages_iter)144 static void bchfs_read(struct btree_trans *trans,
145 		       struct bch_read_bio *rbio,
146 		       subvol_inum inum,
147 		       struct readpages_iter *readpages_iter)
148 {
149 	struct bch_fs *c = trans->c;
150 	struct btree_iter iter;
151 	struct bkey_buf sk;
152 	int flags = BCH_READ_RETRY_IF_STALE|
153 		BCH_READ_MAY_PROMOTE;
154 	int ret = 0;
155 
156 	rbio->c = c;
157 	rbio->start_time = local_clock();
158 	rbio->subvol = inum.subvol;
159 
160 	bch2_bkey_buf_init(&sk);
161 	bch2_trans_begin(trans);
162 	bch2_trans_iter_init(trans, &iter, BTREE_ID_extents,
163 			     POS(inum.inum, rbio->bio.bi_iter.bi_sector),
164 			     BTREE_ITER_slots);
165 	while (1) {
166 		struct bkey_s_c k;
167 		unsigned bytes, sectors;
168 		s64 offset_into_extent;
169 		enum btree_id data_btree = BTREE_ID_extents;
170 
171 		bch2_trans_begin(trans);
172 
173 		u32 snapshot;
174 		ret = bch2_subvolume_get_snapshot(trans, inum.subvol, &snapshot);
175 		if (ret)
176 			goto err;
177 
178 		bch2_btree_iter_set_snapshot(&iter, snapshot);
179 
180 		bch2_btree_iter_set_pos(&iter,
181 				POS(inum.inum, rbio->bio.bi_iter.bi_sector));
182 
183 		k = bch2_btree_iter_peek_slot(&iter);
184 		ret = bkey_err(k);
185 		if (ret)
186 			goto err;
187 
188 		offset_into_extent = iter.pos.offset -
189 			bkey_start_offset(k.k);
190 		sectors = k.k->size - offset_into_extent;
191 
192 		bch2_bkey_buf_reassemble(&sk, c, k);
193 
194 		ret = bch2_read_indirect_extent(trans, &data_btree,
195 					&offset_into_extent, &sk);
196 		if (ret)
197 			goto err;
198 
199 		k = bkey_i_to_s_c(sk.k);
200 
201 		sectors = min_t(unsigned, sectors, k.k->size - offset_into_extent);
202 
203 		if (readpages_iter) {
204 			ret = readpage_bio_extend(trans, readpages_iter, &rbio->bio, sectors,
205 						  extent_partial_reads_expensive(k));
206 			if (ret)
207 				goto err;
208 		}
209 
210 		bytes = min(sectors, bio_sectors(&rbio->bio)) << 9;
211 		swap(rbio->bio.bi_iter.bi_size, bytes);
212 
213 		if (rbio->bio.bi_iter.bi_size == bytes)
214 			flags |= BCH_READ_LAST_FRAGMENT;
215 
216 		bch2_bio_page_state_set(&rbio->bio, k);
217 
218 		bch2_read_extent(trans, rbio, iter.pos,
219 				 data_btree, k, offset_into_extent, flags);
220 
221 		if (flags & BCH_READ_LAST_FRAGMENT)
222 			break;
223 
224 		swap(rbio->bio.bi_iter.bi_size, bytes);
225 		bio_advance(&rbio->bio, bytes);
226 err:
227 		if (ret &&
228 		    !bch2_err_matches(ret, BCH_ERR_transaction_restart))
229 			break;
230 	}
231 	bch2_trans_iter_exit(trans, &iter);
232 
233 	if (ret) {
234 		struct printbuf buf = PRINTBUF;
235 		bch2_inum_offset_err_msg_trans(trans, &buf, inum, iter.pos.offset << 9);
236 		prt_printf(&buf, "read error %i from btree lookup", ret);
237 		bch_err_ratelimited(c, "%s", buf.buf);
238 		printbuf_exit(&buf);
239 
240 		rbio->bio.bi_status = BLK_STS_IOERR;
241 		bio_endio(&rbio->bio);
242 	}
243 
244 	bch2_bkey_buf_exit(&sk, c);
245 }
246 
bch2_readahead(struct readahead_control * ractl)247 void bch2_readahead(struct readahead_control *ractl)
248 {
249 	struct bch_inode_info *inode = to_bch_ei(ractl->mapping->host);
250 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
251 	struct bch_io_opts opts;
252 	struct folio *folio;
253 	struct readpages_iter readpages_iter;
254 	struct blk_plug plug;
255 
256 	bch2_inode_opts_get(&opts, c, &inode->ei_inode);
257 
258 	int ret = readpages_iter_init(&readpages_iter, ractl);
259 	if (ret)
260 		return;
261 
262 	/*
263 	 * Besides being a general performance optimization, plugging helps with
264 	 * avoiding btree transaction srcu warnings - submitting a bio can
265 	 * block, and we don't want todo that with the transaction locked.
266 	 *
267 	 * However, plugged bios are submitted when we schedule; we ideally
268 	 * would have our own scheduler hook to call unlock_long() before
269 	 * scheduling.
270 	 */
271 	blk_start_plug(&plug);
272 	bch2_pagecache_add_get(inode);
273 
274 	struct btree_trans *trans = bch2_trans_get(c);
275 	while ((folio = readpage_iter_peek(&readpages_iter))) {
276 		unsigned n = min_t(unsigned,
277 				   readpages_iter.folios.nr -
278 				   readpages_iter.idx,
279 				   BIO_MAX_VECS);
280 		struct bch_read_bio *rbio =
281 			rbio_init(bio_alloc_bioset(NULL, n, REQ_OP_READ,
282 						   GFP_KERNEL, &c->bio_read),
283 				  opts);
284 
285 		readpage_iter_advance(&readpages_iter);
286 
287 		rbio->bio.bi_iter.bi_sector = folio_sector(folio);
288 		rbio->bio.bi_end_io = bch2_readpages_end_io;
289 		BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
290 
291 		bchfs_read(trans, rbio, inode_inum(inode),
292 			   &readpages_iter);
293 		bch2_trans_unlock(trans);
294 	}
295 	bch2_trans_put(trans);
296 
297 	bch2_pagecache_add_put(inode);
298 	blk_finish_plug(&plug);
299 	darray_exit(&readpages_iter.folios);
300 }
301 
bch2_read_single_folio_end_io(struct bio * bio)302 static void bch2_read_single_folio_end_io(struct bio *bio)
303 {
304 	complete(bio->bi_private);
305 }
306 
bch2_read_single_folio(struct folio * folio,struct address_space * mapping)307 int bch2_read_single_folio(struct folio *folio, struct address_space *mapping)
308 {
309 	struct bch_inode_info *inode = to_bch_ei(mapping->host);
310 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
311 	struct bch_read_bio *rbio;
312 	struct bch_io_opts opts;
313 	struct blk_plug plug;
314 	int ret;
315 	DECLARE_COMPLETION_ONSTACK(done);
316 
317 	BUG_ON(folio_test_uptodate(folio));
318 	BUG_ON(folio_test_dirty(folio));
319 
320 	if (!bch2_folio_create(folio, GFP_KERNEL))
321 		return -ENOMEM;
322 
323 	bch2_inode_opts_get(&opts, c, &inode->ei_inode);
324 
325 	rbio = rbio_init(bio_alloc_bioset(NULL, 1, REQ_OP_READ, GFP_KERNEL, &c->bio_read),
326 			 opts);
327 	rbio->bio.bi_private = &done;
328 	rbio->bio.bi_end_io = bch2_read_single_folio_end_io;
329 
330 	rbio->bio.bi_opf = REQ_OP_READ|REQ_SYNC;
331 	rbio->bio.bi_iter.bi_sector = folio_sector(folio);
332 	BUG_ON(!bio_add_folio(&rbio->bio, folio, folio_size(folio), 0));
333 
334 	blk_start_plug(&plug);
335 	bch2_trans_run(c, (bchfs_read(trans, rbio, inode_inum(inode), NULL), 0));
336 	blk_finish_plug(&plug);
337 	wait_for_completion(&done);
338 
339 	ret = blk_status_to_errno(rbio->bio.bi_status);
340 	bio_put(&rbio->bio);
341 
342 	if (ret < 0)
343 		return ret;
344 
345 	folio_mark_uptodate(folio);
346 	return 0;
347 }
348 
bch2_read_folio(struct file * file,struct folio * folio)349 int bch2_read_folio(struct file *file, struct folio *folio)
350 {
351 	int ret;
352 
353 	ret = bch2_read_single_folio(folio, folio->mapping);
354 	folio_unlock(folio);
355 	return bch2_err_class(ret);
356 }
357 
358 /* writepages: */
359 
360 struct bch_writepage_io {
361 	struct bch_inode_info		*inode;
362 
363 	/* must be last: */
364 	struct bch_write_op		op;
365 };
366 
367 struct bch_writepage_state {
368 	struct bch_writepage_io	*io;
369 	struct bch_io_opts	opts;
370 	struct bch_folio_sector	*tmp;
371 	unsigned		tmp_sectors;
372 };
373 
bch_writepage_state_init(struct bch_fs * c,struct bch_inode_info * inode)374 static inline struct bch_writepage_state bch_writepage_state_init(struct bch_fs *c,
375 								  struct bch_inode_info *inode)
376 {
377 	struct bch_writepage_state ret = { 0 };
378 
379 	bch2_inode_opts_get(&ret.opts, c, &inode->ei_inode);
380 	return ret;
381 }
382 
383 /*
384  * Determine when a writepage io is full. We have to limit writepage bios to a
385  * single page per bvec (i.e. 1MB with 4k pages) because that is the limit to
386  * what the bounce path in bch2_write_extent() can handle. In theory we could
387  * loosen this restriction for non-bounce I/O, but we don't have that context
388  * here. Ideally, we can up this limit and make it configurable in the future
389  * when the bounce path can be enhanced to accommodate larger source bios.
390  */
bch_io_full(struct bch_writepage_io * io,unsigned len)391 static inline bool bch_io_full(struct bch_writepage_io *io, unsigned len)
392 {
393 	struct bio *bio = &io->op.wbio.bio;
394 	return bio_full(bio, len) ||
395 		(bio->bi_iter.bi_size + len > BIO_MAX_VECS * PAGE_SIZE);
396 }
397 
bch2_writepage_io_done(struct bch_write_op * op)398 static void bch2_writepage_io_done(struct bch_write_op *op)
399 {
400 	struct bch_writepage_io *io =
401 		container_of(op, struct bch_writepage_io, op);
402 	struct bch_fs *c = io->op.c;
403 	struct bio *bio = &io->op.wbio.bio;
404 	struct folio_iter fi;
405 	unsigned i;
406 
407 	if (io->op.error) {
408 		set_bit(EI_INODE_ERROR, &io->inode->ei_flags);
409 
410 		bio_for_each_folio_all(fi, bio) {
411 			struct bch_folio *s;
412 
413 			mapping_set_error(fi.folio->mapping, -EIO);
414 
415 			s = __bch2_folio(fi.folio);
416 			spin_lock(&s->lock);
417 			for (i = 0; i < folio_sectors(fi.folio); i++)
418 				s->s[i].nr_replicas = 0;
419 			spin_unlock(&s->lock);
420 		}
421 	}
422 
423 	if (io->op.flags & BCH_WRITE_WROTE_DATA_INLINE) {
424 		bio_for_each_folio_all(fi, bio) {
425 			struct bch_folio *s;
426 
427 			s = __bch2_folio(fi.folio);
428 			spin_lock(&s->lock);
429 			for (i = 0; i < folio_sectors(fi.folio); i++)
430 				s->s[i].nr_replicas = 0;
431 			spin_unlock(&s->lock);
432 		}
433 	}
434 
435 	/*
436 	 * racing with fallocate can cause us to add fewer sectors than
437 	 * expected - but we shouldn't add more sectors than expected:
438 	 */
439 	WARN_ON_ONCE(io->op.i_sectors_delta > 0);
440 
441 	/*
442 	 * (error (due to going RO) halfway through a page can screw that up
443 	 * slightly)
444 	 * XXX wtf?
445 	   BUG_ON(io->op.op.i_sectors_delta >= PAGE_SECTORS);
446 	 */
447 
448 	/*
449 	 * The writeback flag is effectively our ref on the inode -
450 	 * fixup i_blocks before calling folio_end_writeback:
451 	 */
452 	bch2_i_sectors_acct(c, io->inode, NULL, io->op.i_sectors_delta);
453 
454 	bio_for_each_folio_all(fi, bio) {
455 		struct bch_folio *s = __bch2_folio(fi.folio);
456 
457 		if (atomic_dec_and_test(&s->write_count))
458 			folio_end_writeback(fi.folio);
459 	}
460 
461 	bio_put(&io->op.wbio.bio);
462 }
463 
bch2_writepage_do_io(struct bch_writepage_state * w)464 static void bch2_writepage_do_io(struct bch_writepage_state *w)
465 {
466 	struct bch_writepage_io *io = w->io;
467 
468 	w->io = NULL;
469 	closure_call(&io->op.cl, bch2_write, NULL, NULL);
470 }
471 
472 /*
473  * Get a bch_writepage_io and add @page to it - appending to an existing one if
474  * possible, else allocating a new one:
475  */
bch2_writepage_io_alloc(struct bch_fs * c,struct writeback_control * wbc,struct bch_writepage_state * w,struct bch_inode_info * inode,u64 sector,unsigned nr_replicas)476 static void bch2_writepage_io_alloc(struct bch_fs *c,
477 				    struct writeback_control *wbc,
478 				    struct bch_writepage_state *w,
479 				    struct bch_inode_info *inode,
480 				    u64 sector,
481 				    unsigned nr_replicas)
482 {
483 	struct bch_write_op *op;
484 
485 	w->io = container_of(bio_alloc_bioset(NULL, BIO_MAX_VECS,
486 					      REQ_OP_WRITE,
487 					      GFP_KERNEL,
488 					      &c->writepage_bioset),
489 			     struct bch_writepage_io, op.wbio.bio);
490 
491 	w->io->inode		= inode;
492 	op			= &w->io->op;
493 	bch2_write_op_init(op, c, w->opts);
494 	op->target		= w->opts.foreground_target;
495 	op->nr_replicas		= nr_replicas;
496 	op->res.nr_replicas	= nr_replicas;
497 	op->write_point		= writepoint_hashed(inode->ei_last_dirtied);
498 	op->subvol		= inode->ei_inum.subvol;
499 	op->pos			= POS(inode->v.i_ino, sector);
500 	op->end_io		= bch2_writepage_io_done;
501 	op->devs_need_flush	= &inode->ei_devs_need_flush;
502 	op->wbio.bio.bi_iter.bi_sector = sector;
503 	op->wbio.bio.bi_opf	= wbc_to_write_flags(wbc);
504 }
505 
__bch2_writepage(struct folio * folio,struct writeback_control * wbc,void * data)506 static int __bch2_writepage(struct folio *folio,
507 			    struct writeback_control *wbc,
508 			    void *data)
509 {
510 	struct bch_inode_info *inode = to_bch_ei(folio->mapping->host);
511 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
512 	struct bch_writepage_state *w = data;
513 	struct bch_folio *s;
514 	unsigned i, offset, f_sectors, nr_replicas_this_write = U32_MAX;
515 	loff_t i_size = i_size_read(&inode->v);
516 	int ret;
517 
518 	EBUG_ON(!folio_test_uptodate(folio));
519 
520 	/* Is the folio fully inside i_size? */
521 	if (folio_end_pos(folio) <= i_size)
522 		goto do_io;
523 
524 	/* Is the folio fully outside i_size? (truncate in progress) */
525 	if (folio_pos(folio) >= i_size) {
526 		folio_unlock(folio);
527 		return 0;
528 	}
529 
530 	/*
531 	 * The folio straddles i_size.  It must be zeroed out on each and every
532 	 * writepage invocation because it may be mmapped.  "A file is mapped
533 	 * in multiples of the folio size.  For a file that is not a multiple of
534 	 * the  folio size, the remaining memory is zeroed when mapped, and
535 	 * writes to that region are not written out to the file."
536 	 */
537 	folio_zero_segment(folio,
538 			   i_size - folio_pos(folio),
539 			   folio_size(folio));
540 do_io:
541 	f_sectors = folio_sectors(folio);
542 	s = bch2_folio(folio);
543 
544 	if (f_sectors > w->tmp_sectors) {
545 		kfree(w->tmp);
546 		w->tmp = kcalloc(f_sectors, sizeof(struct bch_folio_sector), GFP_NOFS|__GFP_NOFAIL);
547 		w->tmp_sectors = f_sectors;
548 	}
549 
550 	/*
551 	 * Things get really hairy with errors during writeback:
552 	 */
553 	ret = bch2_get_folio_disk_reservation(c, inode, folio, false);
554 	BUG_ON(ret);
555 
556 	/* Before unlocking the page, get copy of reservations: */
557 	spin_lock(&s->lock);
558 	memcpy(w->tmp, s->s, sizeof(struct bch_folio_sector) * f_sectors);
559 
560 	for (i = 0; i < f_sectors; i++) {
561 		if (s->s[i].state < SECTOR_dirty)
562 			continue;
563 
564 		nr_replicas_this_write =
565 			min_t(unsigned, nr_replicas_this_write,
566 			      s->s[i].nr_replicas +
567 			      s->s[i].replicas_reserved);
568 	}
569 
570 	for (i = 0; i < f_sectors; i++) {
571 		if (s->s[i].state < SECTOR_dirty)
572 			continue;
573 
574 		s->s[i].nr_replicas = w->opts.compression
575 			? 0 : nr_replicas_this_write;
576 
577 		s->s[i].replicas_reserved = 0;
578 		bch2_folio_sector_set(folio, s, i, SECTOR_allocated);
579 	}
580 	spin_unlock(&s->lock);
581 
582 	BUG_ON(atomic_read(&s->write_count));
583 	atomic_set(&s->write_count, 1);
584 
585 	BUG_ON(folio_test_writeback(folio));
586 	folio_start_writeback(folio);
587 
588 	folio_unlock(folio);
589 
590 	offset = 0;
591 	while (1) {
592 		unsigned sectors = 0, dirty_sectors = 0, reserved_sectors = 0;
593 		u64 sector;
594 
595 		while (offset < f_sectors &&
596 		       w->tmp[offset].state < SECTOR_dirty)
597 			offset++;
598 
599 		if (offset == f_sectors)
600 			break;
601 
602 		while (offset + sectors < f_sectors &&
603 		       w->tmp[offset + sectors].state >= SECTOR_dirty) {
604 			reserved_sectors += w->tmp[offset + sectors].replicas_reserved;
605 			dirty_sectors += w->tmp[offset + sectors].state == SECTOR_dirty;
606 			sectors++;
607 		}
608 		BUG_ON(!sectors);
609 
610 		sector = folio_sector(folio) + offset;
611 
612 		if (w->io &&
613 		    (w->io->op.res.nr_replicas != nr_replicas_this_write ||
614 		     bch_io_full(w->io, sectors << 9) ||
615 		     bio_end_sector(&w->io->op.wbio.bio) != sector))
616 			bch2_writepage_do_io(w);
617 
618 		if (!w->io)
619 			bch2_writepage_io_alloc(c, wbc, w, inode, sector,
620 						nr_replicas_this_write);
621 
622 		atomic_inc(&s->write_count);
623 
624 		BUG_ON(inode != w->io->inode);
625 		BUG_ON(!bio_add_folio(&w->io->op.wbio.bio, folio,
626 				     sectors << 9, offset << 9));
627 
628 		w->io->op.res.sectors += reserved_sectors;
629 		w->io->op.i_sectors_delta -= dirty_sectors;
630 		w->io->op.new_i_size = i_size;
631 
632 		offset += sectors;
633 	}
634 
635 	if (atomic_dec_and_test(&s->write_count))
636 		folio_end_writeback(folio);
637 
638 	return 0;
639 }
640 
bch2_writepages(struct address_space * mapping,struct writeback_control * wbc)641 int bch2_writepages(struct address_space *mapping, struct writeback_control *wbc)
642 {
643 	struct bch_fs *c = mapping->host->i_sb->s_fs_info;
644 	struct bch_writepage_state w =
645 		bch_writepage_state_init(c, to_bch_ei(mapping->host));
646 	struct blk_plug plug;
647 	int ret;
648 
649 	blk_start_plug(&plug);
650 	ret = write_cache_pages(mapping, wbc, __bch2_writepage, &w);
651 	if (w.io)
652 		bch2_writepage_do_io(&w);
653 	blk_finish_plug(&plug);
654 	kfree(w.tmp);
655 	return bch2_err_class(ret);
656 }
657 
658 /* buffered writes: */
659 
bch2_write_begin(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,struct folio ** foliop,void ** fsdata)660 int bch2_write_begin(struct file *file, struct address_space *mapping,
661 		     loff_t pos, unsigned len,
662 		     struct folio **foliop, void **fsdata)
663 {
664 	struct bch_inode_info *inode = to_bch_ei(mapping->host);
665 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
666 	struct bch2_folio_reservation *res;
667 	struct folio *folio;
668 	unsigned offset;
669 	int ret = -ENOMEM;
670 
671 	res = kmalloc(sizeof(*res), GFP_KERNEL);
672 	if (!res)
673 		return -ENOMEM;
674 
675 	bch2_folio_reservation_init(c, inode, res);
676 	*fsdata = res;
677 
678 	bch2_pagecache_add_get(inode);
679 
680 	folio = __filemap_get_folio(mapping, pos >> PAGE_SHIFT,
681 				    FGP_WRITEBEGIN | fgf_set_order(len),
682 				    mapping_gfp_mask(mapping));
683 	if (IS_ERR(folio))
684 		goto err_unlock;
685 
686 	offset = pos - folio_pos(folio);
687 	len = min_t(size_t, len, folio_end_pos(folio) - pos);
688 
689 	if (folio_test_uptodate(folio))
690 		goto out;
691 
692 	/* If we're writing entire folio, don't need to read it in first: */
693 	if (!offset && len == folio_size(folio))
694 		goto out;
695 
696 	if (!offset && pos + len >= inode->v.i_size) {
697 		folio_zero_segment(folio, len, folio_size(folio));
698 		flush_dcache_folio(folio);
699 		goto out;
700 	}
701 
702 	if (folio_pos(folio) >= inode->v.i_size) {
703 		folio_zero_segments(folio, 0, offset, offset + len, folio_size(folio));
704 		flush_dcache_folio(folio);
705 		goto out;
706 	}
707 readpage:
708 	ret = bch2_read_single_folio(folio, mapping);
709 	if (ret)
710 		goto err;
711 out:
712 	ret = bch2_folio_set(c, inode_inum(inode), &folio, 1);
713 	if (ret)
714 		goto err;
715 
716 	ret = bch2_folio_reservation_get(c, inode, folio, res, offset, len);
717 	if (ret) {
718 		if (!folio_test_uptodate(folio)) {
719 			/*
720 			 * If the folio hasn't been read in, we won't know if we
721 			 * actually need a reservation - we don't actually need
722 			 * to read here, we just need to check if the folio is
723 			 * fully backed by uncompressed data:
724 			 */
725 			goto readpage;
726 		}
727 
728 		goto err;
729 	}
730 
731 	*foliop = folio;
732 	return 0;
733 err:
734 	folio_unlock(folio);
735 	folio_put(folio);
736 err_unlock:
737 	bch2_pagecache_add_put(inode);
738 	kfree(res);
739 	*fsdata = NULL;
740 	return bch2_err_class(ret);
741 }
742 
bch2_write_end(struct file * file,struct address_space * mapping,loff_t pos,unsigned len,unsigned copied,struct folio * folio,void * fsdata)743 int bch2_write_end(struct file *file, struct address_space *mapping,
744 		   loff_t pos, unsigned len, unsigned copied,
745 		   struct folio *folio, void *fsdata)
746 {
747 	struct bch_inode_info *inode = to_bch_ei(mapping->host);
748 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
749 	struct bch2_folio_reservation *res = fsdata;
750 	unsigned offset = pos - folio_pos(folio);
751 
752 	lockdep_assert_held(&inode->v.i_rwsem);
753 	BUG_ON(offset + copied > folio_size(folio));
754 
755 	if (unlikely(copied < len && !folio_test_uptodate(folio))) {
756 		/*
757 		 * The folio needs to be read in, but that would destroy
758 		 * our partial write - simplest thing is to just force
759 		 * userspace to redo the write:
760 		 */
761 		folio_zero_range(folio, 0, folio_size(folio));
762 		flush_dcache_folio(folio);
763 		copied = 0;
764 	}
765 
766 	spin_lock(&inode->v.i_lock);
767 	if (pos + copied > inode->v.i_size)
768 		i_size_write(&inode->v, pos + copied);
769 	spin_unlock(&inode->v.i_lock);
770 
771 	if (copied) {
772 		if (!folio_test_uptodate(folio))
773 			folio_mark_uptodate(folio);
774 
775 		bch2_set_folio_dirty(c, inode, folio, res, offset, copied);
776 
777 		inode->ei_last_dirtied = (unsigned long) current;
778 	}
779 
780 	folio_unlock(folio);
781 	folio_put(folio);
782 	bch2_pagecache_add_put(inode);
783 
784 	bch2_folio_reservation_put(c, inode, res);
785 	kfree(res);
786 
787 	return copied;
788 }
789 
folios_trunc(folios * fs,struct folio ** fi)790 static noinline void folios_trunc(folios *fs, struct folio **fi)
791 {
792 	while (fs->data + fs->nr > fi) {
793 		struct folio *f = darray_pop(fs);
794 
795 		folio_unlock(f);
796 		folio_put(f);
797 	}
798 }
799 
__bch2_buffered_write(struct bch_inode_info * inode,struct address_space * mapping,struct iov_iter * iter,loff_t pos,unsigned len)800 static int __bch2_buffered_write(struct bch_inode_info *inode,
801 				 struct address_space *mapping,
802 				 struct iov_iter *iter,
803 				 loff_t pos, unsigned len)
804 {
805 	struct bch_fs *c = inode->v.i_sb->s_fs_info;
806 	struct bch2_folio_reservation res;
807 	folios fs;
808 	struct folio *f;
809 	unsigned copied = 0, f_offset, f_copied;
810 	u64 end = pos + len, f_pos, f_len;
811 	loff_t last_folio_pos = inode->v.i_size;
812 	int ret = 0;
813 
814 	BUG_ON(!len);
815 
816 	bch2_folio_reservation_init(c, inode, &res);
817 	darray_init(&fs);
818 
819 	ret = bch2_filemap_get_contig_folios_d(mapping, pos, end,
820 					       FGP_WRITEBEGIN | fgf_set_order(len),
821 					       mapping_gfp_mask(mapping), &fs);
822 	if (ret)
823 		goto out;
824 
825 	BUG_ON(!fs.nr);
826 
827 	f = darray_first(fs);
828 	if (pos != folio_pos(f) && !folio_test_uptodate(f)) {
829 		ret = bch2_read_single_folio(f, mapping);
830 		if (ret)
831 			goto out;
832 	}
833 
834 	f = darray_last(fs);
835 	end = min(end, folio_end_pos(f));
836 	last_folio_pos = folio_pos(f);
837 	if (end != folio_end_pos(f) && !folio_test_uptodate(f)) {
838 		if (end >= inode->v.i_size) {
839 			folio_zero_range(f, 0, folio_size(f));
840 		} else {
841 			ret = bch2_read_single_folio(f, mapping);
842 			if (ret)
843 				goto out;
844 		}
845 	}
846 
847 	ret = bch2_folio_set(c, inode_inum(inode), fs.data, fs.nr);
848 	if (ret)
849 		goto out;
850 
851 	f_pos = pos;
852 	f_offset = pos - folio_pos(darray_first(fs));
853 	darray_for_each(fs, fi) {
854 		ssize_t f_reserved;
855 
856 		f = *fi;
857 		f_len = min(end, folio_end_pos(f)) - f_pos;
858 		f_reserved = bch2_folio_reservation_get_partial(c, inode, f, &res, f_offset, f_len);
859 
860 		if (unlikely(f_reserved != f_len)) {
861 			if (f_reserved < 0) {
862 				if (f == darray_first(fs)) {
863 					ret = f_reserved;
864 					goto out;
865 				}
866 
867 				folios_trunc(&fs, fi);
868 				end = min(end, folio_end_pos(darray_last(fs)));
869 			} else {
870 				if (!folio_test_uptodate(f)) {
871 					ret = bch2_read_single_folio(f, mapping);
872 					if (ret)
873 						goto out;
874 				}
875 
876 				folios_trunc(&fs, fi + 1);
877 				end = f_pos + f_reserved;
878 			}
879 
880 			break;
881 		}
882 
883 		f_pos = folio_end_pos(f);
884 		f_offset = 0;
885 	}
886 
887 	if (mapping_writably_mapped(mapping))
888 		darray_for_each(fs, fi)
889 			flush_dcache_folio(*fi);
890 
891 	f_pos = pos;
892 	f_offset = pos - folio_pos(darray_first(fs));
893 	darray_for_each(fs, fi) {
894 		f = *fi;
895 		f_len = min(end, folio_end_pos(f)) - f_pos;
896 		f_copied = copy_folio_from_iter_atomic(f, f_offset, f_len, iter);
897 		if (!f_copied) {
898 			folios_trunc(&fs, fi);
899 			break;
900 		}
901 
902 		if (!folio_test_uptodate(f) &&
903 		    f_copied != folio_size(f) &&
904 		    pos + copied + f_copied < inode->v.i_size) {
905 			iov_iter_revert(iter, f_copied);
906 			folio_zero_range(f, 0, folio_size(f));
907 			folios_trunc(&fs, fi);
908 			break;
909 		}
910 
911 		flush_dcache_folio(f);
912 		copied += f_copied;
913 
914 		if (f_copied != f_len) {
915 			folios_trunc(&fs, fi + 1);
916 			break;
917 		}
918 
919 		f_pos = folio_end_pos(f);
920 		f_offset = 0;
921 	}
922 
923 	if (!copied)
924 		goto out;
925 
926 	end = pos + copied;
927 
928 	spin_lock(&inode->v.i_lock);
929 	if (end > inode->v.i_size)
930 		i_size_write(&inode->v, end);
931 	spin_unlock(&inode->v.i_lock);
932 
933 	f_pos = pos;
934 	f_offset = pos - folio_pos(darray_first(fs));
935 	darray_for_each(fs, fi) {
936 		f = *fi;
937 		f_len = min(end, folio_end_pos(f)) - f_pos;
938 
939 		if (!folio_test_uptodate(f))
940 			folio_mark_uptodate(f);
941 
942 		bch2_set_folio_dirty(c, inode, f, &res, f_offset, f_len);
943 
944 		f_pos = folio_end_pos(f);
945 		f_offset = 0;
946 	}
947 
948 	inode->ei_last_dirtied = (unsigned long) current;
949 out:
950 	darray_for_each(fs, fi) {
951 		folio_unlock(*fi);
952 		folio_put(*fi);
953 	}
954 
955 	/*
956 	 * If the last folio added to the mapping starts beyond current EOF, we
957 	 * performed a short write but left around at least one post-EOF folio.
958 	 * Clean up the mapping before we return.
959 	 */
960 	if (last_folio_pos >= inode->v.i_size)
961 		truncate_pagecache(&inode->v, inode->v.i_size);
962 
963 	darray_exit(&fs);
964 	bch2_folio_reservation_put(c, inode, &res);
965 
966 	return copied ?: ret;
967 }
968 
bch2_buffered_write(struct kiocb * iocb,struct iov_iter * iter)969 static ssize_t bch2_buffered_write(struct kiocb *iocb, struct iov_iter *iter)
970 {
971 	struct file *file = iocb->ki_filp;
972 	struct address_space *mapping = file->f_mapping;
973 	struct bch_inode_info *inode = file_bch_inode(file);
974 	loff_t pos = iocb->ki_pos;
975 	ssize_t written = 0;
976 	int ret = 0;
977 
978 	bch2_pagecache_add_get(inode);
979 
980 	do {
981 		unsigned offset = pos & (PAGE_SIZE - 1);
982 		unsigned bytes = iov_iter_count(iter);
983 again:
984 		/*
985 		 * Bring in the user page that we will copy from _first_.
986 		 * Otherwise there's a nasty deadlock on copying from the
987 		 * same page as we're writing to, without it being marked
988 		 * up-to-date.
989 		 *
990 		 * Not only is this an optimisation, but it is also required
991 		 * to check that the address is actually valid, when atomic
992 		 * usercopies are used, below.
993 		 */
994 		if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
995 			bytes = min_t(unsigned long, iov_iter_count(iter),
996 				      PAGE_SIZE - offset);
997 
998 			if (unlikely(fault_in_iov_iter_readable(iter, bytes))) {
999 				ret = -EFAULT;
1000 				break;
1001 			}
1002 		}
1003 
1004 		if (unlikely(fatal_signal_pending(current))) {
1005 			ret = -EINTR;
1006 			break;
1007 		}
1008 
1009 		ret = __bch2_buffered_write(inode, mapping, iter, pos, bytes);
1010 		if (unlikely(ret < 0))
1011 			break;
1012 
1013 		cond_resched();
1014 
1015 		if (unlikely(ret == 0)) {
1016 			/*
1017 			 * If we were unable to copy any data at all, we must
1018 			 * fall back to a single segment length write.
1019 			 *
1020 			 * If we didn't fallback here, we could livelock
1021 			 * because not all segments in the iov can be copied at
1022 			 * once without a pagefault.
1023 			 */
1024 			bytes = min_t(unsigned long, PAGE_SIZE - offset,
1025 				      iov_iter_single_seg_count(iter));
1026 			goto again;
1027 		}
1028 		pos += ret;
1029 		written += ret;
1030 		ret = 0;
1031 
1032 		balance_dirty_pages_ratelimited(mapping);
1033 	} while (iov_iter_count(iter));
1034 
1035 	bch2_pagecache_add_put(inode);
1036 
1037 	return written ? written : ret;
1038 }
1039 
bch2_write_iter(struct kiocb * iocb,struct iov_iter * from)1040 ssize_t bch2_write_iter(struct kiocb *iocb, struct iov_iter *from)
1041 {
1042 	struct file *file = iocb->ki_filp;
1043 	struct bch_inode_info *inode = file_bch_inode(file);
1044 	ssize_t ret;
1045 
1046 	if (iocb->ki_flags & IOCB_DIRECT) {
1047 		ret = bch2_direct_write(iocb, from);
1048 		goto out;
1049 	}
1050 
1051 	inode_lock(&inode->v);
1052 
1053 	ret = generic_write_checks(iocb, from);
1054 	if (ret <= 0)
1055 		goto unlock;
1056 
1057 	ret = file_remove_privs(file);
1058 	if (ret)
1059 		goto unlock;
1060 
1061 	ret = file_update_time(file);
1062 	if (ret)
1063 		goto unlock;
1064 
1065 	ret = bch2_buffered_write(iocb, from);
1066 	if (likely(ret > 0))
1067 		iocb->ki_pos += ret;
1068 unlock:
1069 	inode_unlock(&inode->v);
1070 
1071 	if (ret > 0)
1072 		ret = generic_write_sync(iocb, ret);
1073 out:
1074 	return bch2_err_class(ret);
1075 }
1076 
bch2_fs_fs_io_buffered_exit(struct bch_fs * c)1077 void bch2_fs_fs_io_buffered_exit(struct bch_fs *c)
1078 {
1079 	bioset_exit(&c->writepage_bioset);
1080 }
1081 
bch2_fs_fs_io_buffered_init(struct bch_fs * c)1082 int bch2_fs_fs_io_buffered_init(struct bch_fs *c)
1083 {
1084 	if (bioset_init(&c->writepage_bioset,
1085 			4, offsetof(struct bch_writepage_io, op.wbio.bio),
1086 			BIOSET_NEED_BVECS))
1087 		return -BCH_ERR_ENOMEM_writepage_bioset_init;
1088 
1089 	return 0;
1090 }
1091 
1092 #endif /* NO_BCACHEFS_FS */
1093